summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c4
-rw-r--r--drivers/net/3c503.c4
-rw-r--r--drivers/net/3c507.c6
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig261
-rw-r--r--drivers/net/arm/am79c961a.c9
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c8
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/atl1.c3
-rw-r--r--drivers/net/atlx/atl2.c4
-rw-r--r--drivers/net/au1000_eth.c12
-rw-r--r--drivers/net/ax88796.c8
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h41
-rw-r--r--drivers/net/benet/be_cmds.c146
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c260
-rw-r--r--drivers/net/bnx2.c104
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h148
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c199
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h73
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c1491
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h193
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c357
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h336
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c723
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h56
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c696
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h52
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c13
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c36
-rw-r--r--drivers/net/bonding/bond_alb.h38
-rw-r--r--drivers/net/bonding/bond_debugfs.c146
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c87
-rw-r--r--drivers/net/bonding/bonding.h27
-rw-r--r--drivers/net/caif/caif_shm_u5500.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/Kconfig21
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c9
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c1348
-rw-r--r--drivers/net/can/sja1000/plx_pci.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c8
-rw-r--r--drivers/net/can/slcan.c756
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cris/eth_v10.c34
-rw-r--r--drivers/net/cxgb3/ael1002.c24
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c141
-rw-r--r--drivers/net/cxgb4/sge.c22
-rw-r--r--drivers/net/cxgb4/t4_hw.c95
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c163
-rw-r--r--drivers/net/cxgb4vf/sge.c131
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c120
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_hw.c20
-rw-r--r--drivers/net/e1000/e1000_main.c30
-rw-r--r--drivers/net/e1000/e1000_param.c13
-rw-r--r--drivers/net/e1000e/82571.c189
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h4
-rw-r--r--drivers/net/e1000e/ethtool.c29
-rw-r--r--drivers/net/e1000e/ich8lan.c18
-rw-r--r--drivers/net/e1000e/lib.c135
-rw-r--r--drivers/net/e1000e/netdev.c86
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c25
-rw-r--r--drivers/net/e2100.c2
-rw-r--r--drivers/net/eepro.c11
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea.h15
-rw-r--r--drivers/net/ehea/ehea_ethtool.c27
-rw-r--r--drivers/net/ehea/ehea_main.c450
-rw-r--r--drivers/net/ehea/ehea_phyp.c40
-rw-r--r--drivers/net/ehea/ehea_qmr.c89
-rw-r--r--drivers/net/enic/enic.h6
-rw-r--r--drivers/net/enic/enic_main.c250
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_vic.h31
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/fec_mpc52xx.c19
-rw-r--r--drivers/net/forcedeth.c1134
-rw-r--r--drivers/net/gianfar.c17
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/hp.c6
-rw-r--r--drivers/net/ibm_newemac/core.c3
-rw-r--r--drivers/net/ibmveth.c7
-rw-r--r--drivers/net/ifb.c41
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_nvm.c93
-rw-r--r--drivers/net/igb/e1000_nvm.h2
-rw-r--r--drivers/net/igb/e1000_phy.c11
-rw-r--r--drivers/net/igb/igb_main.c26
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c9
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c24
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/igbvf/vf.h2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/act200l-sir.c2
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/iseries_veth.c27
-rw-r--r--drivers/net/ixgb/ixgb_main.c61
-rw-r--r--drivers/net/ixgb/ixgb_param.c21
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h122
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c58
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c136
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c192
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c297
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c15
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2112
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c722
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c22
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c24
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/ksz884x.c20
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/macvlan.c113
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c16
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c9
-rw-r--r--drivers/net/ni52.c4
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c18
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/pcmcia/axnet_cs.c49
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/icplus.c59
-rw-r--r--drivers/net/phy/marvell.c164
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/ppp_generic.c55
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pptp.c5
-rw-r--r--drivers/net/pxa168_eth.c9
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h43
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c157
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h27
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c91
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c123
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c90
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c28
-rw-r--r--drivers/net/qlge/qlge_mpi.c14
-rw-r--r--drivers/net/r8169.c42
-rw-r--r--drivers/net/s2io.c79
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sc92031.c3
-rw-r--r--drivers/net/sfc/efx.c81
-rw-r--r--drivers/net/sfc/efx.h7
-rw-r--r--drivers/net/sfc/ethtool.c168
-rw-r--r--drivers/net/sfc/falcon.c183
-rw-r--r--drivers/net/sfc/falcon_boards.c120
-rw-r--r--drivers/net/sfc/falcon_xmac.c14
-rw-r--r--drivers/net/sfc/filter.c255
-rw-r--r--drivers/net/sfc/filter.h149
-rw-r--r--drivers/net/sfc/io.h153
-rw-r--r--drivers/net/sfc/mcdi.c3
-rw-r--r--drivers/net/sfc/mcdi_phy.c1
-rw-r--r--drivers/net/sfc/mdio_10g.c1
-rw-r--r--drivers/net/sfc/mtd.c98
-rw-r--r--drivers/net/sfc/net_driver.h89
-rw-r--r--drivers/net/sfc/nic.c96
-rw-r--r--drivers/net/sfc/nic.h12
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sfc/rx.c30
-rw-r--r--drivers/net/sfc/siena.c10
-rw-r--r--drivers/net/sfc/spi.h5
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sfc/tx.c122
-rw-r--r--drivers/net/sh_eth.c245
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/skfp/smt.c4
-rw-r--r--drivers/net/skge.c9
-rw-r--r--drivers/net/smc-ultra.c8
-rw-r--r--drivers/net/smsc911x.h2
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/stmmac/stmmac_main.c271
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/sundance.c23
-rw-r--r--drivers/net/sungem.c14
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunlance.c10
-rw-r--r--drivers/net/tehuti.c4
-rw-r--r--drivers/net/tg3.c313
-rw-r--r--drivers/net/tg3.h42
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/de2104x.c19
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/Kconfig19
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c1213
-rw-r--r--drivers/net/usb/hso.c54
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/usb/smsc95xx.c7
-rw-r--r--drivers/net/usb/usbnet.c59
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/via-rhine.c326
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c965
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c174
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h73
-rw-r--r--drivers/net/vxge/vxge-config.c3604
-rw-r--r--drivers/net/vxge/vxge-config.h169
-rw-r--r--drivers/net/vxge/vxge-ethtool.c112
-rw-r--r--drivers/net/vxge/vxge-main.c1106
-rw-r--r--drivers/net/vxge/vxge-main.h86
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.c773
-rw-r--r--drivers/net/vxge/vxge-traffic.h49
-rw-r--r--drivers/net/vxge/vxge-version.h33
-rw-r--r--drivers/net/wan/dscc4.c6
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/x25_asy.c11
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c96
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h19
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1
-rw-r--r--drivers/net/xilinx_emaclite.c36
-rw-r--r--drivers/net/znet.c2
310 files changed, 18963 insertions, 10148 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 1776ab61b05f..9e1c03eb97ae 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -158,8 +158,8 @@ static int mem_start;
struct net_device * __init el1_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static unsigned ports[] = { 0x280, 0x300, 0};
- unsigned *port;
+ static const unsigned ports[] = { 0x280, 0x300, 0};
+ const unsigned *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 4777a1cbcd8d..d84f6e8903a5 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -392,8 +392,8 @@ el2_open(struct net_device *dev)
int retval;
if (dev->irq < 2) {
- int irqlist[] = {5, 9, 3, 4, 0};
- int *irqp = irqlist;
+ static const int irqlist[] = {5, 9, 3, 4, 0};
+ const int *irqp = irqlist;
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..1e945551c144 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
#define RX_BUF_END (dev->mem_end - dev->mem_start)
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
/*
That's it: only 86 bytes to set up the beast, including every extra
@@ -311,8 +311,8 @@ static int mem_start;
struct net_device * __init el16_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
- unsigned *port;
+ static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
+ const unsigned *port;
int err = -ENODEV;
if (!dev)
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
#define WAIT_TX_AVAIL 200
/* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
/* The size here is somewhat misleading: the Corkscrew also uses the ISA
aliased registers at <base>+0x400.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 013b7c396663..8c094bae8bf3 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -317,13 +317,13 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
u8 POS;
u32 base;
struct mc32_local *lp = netdev_priv(dev);
- static u16 mca_io_bases[]={
+ static const u16 mca_io_bases[] = {
0x7280,0x7290,
0x7680,0x7690,
0x7A80,0x7A90,
0x7E80,0x7E90
};
- static u32 mca_mem_bases[]={
+ static const u32 mca_mem_bases[] = {
0x00C0000,
0x00C4000,
0x00C8000,
@@ -333,7 +333,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
0x00D8000,
0x00DC000
};
- static char *failures[]={
+ static const char * const failures[] = {
"Processor instruction",
"Processor data bus",
"Processor data bus",
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_PCI(dev) NULL
#endif
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
#ifdef CONFIG_EISA
#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_EISA(dev) NULL
#endif
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f5166dccd8df..98517a373473 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1092,10 +1092,11 @@ err_out:
static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = netdev_priv(dev);
assert (dev != NULL);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&tp->thread);
unregister_netdev (dev);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..a20693fcb321 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
- to identify the adapter.
+ to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
@@ -1786,17 +1786,17 @@ config KS8842
tristate "Micrel KSZ8841/42 with generic bus interface"
depends on HAS_IOMEM && DMA_ENGINE
help
- This platform driver is for KSZ8841(1-port) / KS8842(2-port)
- ethernet switch chip (managed, VLAN, QoS) from Micrel or
- Timberdale(FPGA).
+ This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+ ethernet switch chip (managed, VLAN, QoS) from Micrel or
+ Timberdale(FPGA).
config KS8851
- tristate "Micrel KS8851 SPI"
- depends on SPI
- select MII
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+ select MII
select CRC32
- help
- SPI driver for Micrel KS8851 SPI attached network chip.
+ help
+ SPI driver for Micrel KS8851 SPI attached network chip.
config KS8851_MLL
tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
will be called ipg. This is recommended.
config IGB
- tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82575/82576 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
+ tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igb.
+ To compile this driver as a module, choose M here. The module
+ will be called igb.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
is used, with the intent of lessening the impact of cache misses.
config IGBVF
- tristate "Intel(R) 82576 Virtual Function Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82576 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82576 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82576 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igbvf.
+ To compile this driver as a module, choose M here. The module
+ will be called igbvf.
source "drivers/net/ixp2000/Kconfig"
@@ -2300,14 +2300,14 @@ config SKGE
will be called skge. This is recommended.
config SKGE_DEBUG
- bool "Debugging interface"
- depends on SKGE && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/skge/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config SKY2
tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
will be called sky2. This is recommended.
config SKY2_DEBUG
- bool "Debugging interface"
- depends on SKY2 && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/sky2/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config VIA_VELOCITY
tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@ config SPIDER_NET
Cell Processor-Based Blades from IBM.
config TSI108_ETH
- tristate "Tundra TSI108 gigabit Ethernet support"
- depends on TSI108_BRIDGE
- help
- This driver supports Tundra TSI108 gigabit Ethernet ports.
- To compile this driver as a module, choose M here: the module
- will be called tsi108_eth.
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ help
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
config GELIC_NET
tristate "PS3 Gigabit Ethernet driver"
@@ -2543,10 +2543,10 @@ config PCH_GBE
depends on PCI
select MII
---help---
- This is a gigabit ethernet driver for Topcliff PCH.
- Topcliff PCH is the platform controller hub that is used in Intel's
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
general embedded platform.
- Topcliff PCH has Gigabit Ethernet interface.
+ EG20T PCH has Gigabit Ethernet interface.
Using this interface, it is able to access system devices connected
to Gigabit Ethernet.
This driver enables Gigabit Ethernet function.
@@ -2573,32 +2573,32 @@ config MDIO
tristate
config CHELSIO_T1
- tristate "Chelsio 10Gb Ethernet support"
- depends on PCI
+ tristate "Chelsio 10Gb Ethernet support"
+ depends on PCI
select CRC32
select MDIO
- help
- This driver supports Chelsio gigabit and 10-gigabit
- Ethernet cards. More information about adapter features and
+ help
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
- Please send feedback to <linux-bugs@chelsio.com>.
+ Please send feedback to <linux-bugs@chelsio.com>.
- To compile this driver as a module, choose M here: the module
- will be called cxgb.
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb.
config CHELSIO_T1_1G
- bool "Chelsio gigabit Ethernet support"
- depends on CHELSIO_T1
- help
- Enables support for Chelsio's gigabit Ethernet PCI cards. If you
- are using only 10G cards say 'N' here.
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ help
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
config CHELSIO_T3_DEPENDS
tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
- depends on PCI_MSI
- ---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/sb/CS-008441.htm>
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
- To compile this driver as a module, choose M here. The module
- will be called ixgbevf. MSI-X interrupt support is required
- for this driver to work correctly.
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
config IXGB
tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
will be called ixgb.
config S2IO
- tristate "S2IO 10Gbe XFrame NIC"
+ tristate "Exar Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
- This driver supports the 10Gbe XFrame NIC of S2IO.
+ This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called s2io.
+
config VXGE
- tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
depends on PCI && INET
---help---
- This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+ This driver supports Exar Corp's X3100 Series 10 GbE PCIe
I/O Virtualized Server Adapter.
+
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called vxge.
+
config VXGE_DEBUG_TRACE_ALL
bool "Enabling All Debug trace statments in driver"
default n
depends on VXGE
---help---
Say Y here if you want to enabling all the debug trace statements in
- driver. By default only few debug trace statements are enabled.
+ the vxge driver. By default only few debug trace statements are
+ enabled.
config MYRI10GE
tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
will be called qlge.
config BNA
- tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
- depends on PCI
- ---help---
- This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
- cards.
- To compile this driver as a module, choose M here: the module
- will be called bna.
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
- For general information and support, go to the Brocade support
- website at:
+ For general information and support, go to the Brocade support
+ website at:
- <http://support.brocade.com>
+ <http://support.brocade.com>
source "drivers/net/sfc/Kconfig"
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
modules once you have said "make modules". If unsure, say N.
config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
- select CRYPTO
- select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
- ---help---
- Support for the MPPE Encryption protocol, as employed by the
- Microsoft Point-to-Point Tunneling Protocol.
-
- See http://pptpclient.sourceforge.net/ for information on
- configuring PPTP clients and servers to utilize this method.
+ tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_SHA1
+ select CRYPTO_ARC4
+ select CRYPTO_ECB
+ ---help---
+ Support for the MPPE Encryption protocol, as employed by the
+ Microsoft Point-to-Point Tunneling Protocol.
+
+ See http://pptpclient.sourceforge.net/ for information on
+ configuring PPTP clients and servers to utilize this method.
config PPPOE
tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
depends on EXPERIMENTAL && VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VMXNET3
- tristate "VMware VMXNET3 ethernet driver"
- depends on PCI && INET
- help
- This driver supports VMware's vmxnet3 virtual ethernet NIC.
- To compile this driver as a module, choose M here: the
- module will be called vmxnet3.
+ tristate "VMware VMXNET3 ethernet driver"
+ depends on PCI && INET
+ help
+ This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ To compile this driver as a module, choose M here: the
+ module will be called vmxnet3.
endif # NETDEVICES
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f21106efec..0c9217f48b72 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
-/*
- * Get the current statistics.
- */
-static struct net_device_stats *am79c961_getstats (struct net_device *dev)
-{
- return &dev->stats;
-}
-
static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
.ndo_open = am79c961_open,
.ndo_stop = am79c961_close,
.ndo_start_xmit = am79c961_sendpacket,
- .ndo_get_stats = am79c961_getstats,
.ndo_set_multicast_list = am79c961_setmulticastlist,
.ndo_tx_timeout = am79c961_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
#define TX_DESC_SIZE 10
#define MAX_RBUFF_SZ 0x600
#define MAX_TBUFF_SZ 0x600
-#define TX_TIMEOUT 50
+#define TX_TIMEOUT (HZ/2)
#define DELAY 1000
#define CAM0 0x0
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..f4744fc89768 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
#define PORT_OFFSET(o) (o)
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
/* Index to functions, as function prototypes. */
@@ -270,9 +270,9 @@ static const struct net_device_ops at1700_netdev_ops = {
static int __init at1700_probe1(struct net_device *dev, int ioaddr)
{
- char fmv_irqmap[4] = {3, 7, 10, 15};
- char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
- char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ static const char fmv_irqmap[4] = {3, 7, 10, 15};
+ static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
+ static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
int slot, ret = -ENODEV;
struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
/* Enable OTP CLK */
if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 09b099bfab2b..e48ea956c51f 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2078,7 +2078,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index ef6349bf3b33..e28f8baf394e 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1649,7 +1649,7 @@ check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
netdev_err(adapter->netdev,
"payload offset should not ant event number\n");
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3b24ac..def8df83359c 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2174,7 +2174,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
u8 css, cso;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- css = (u8) (skb->csum_start - skb_headroom(skb));
+ css = skb_checksum_start_offset(skb);
cso = css + (u8) skb->csum_offset;
if (unlikely(css & 0x1)) {
/* L1 hardware requires an even number here */
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 35b14bec1207..4e6f4e95a5a0 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1504,8 +1504,8 @@ static void __devexit atl2_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_config_timer);
-
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
unregister_netdev(netdev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..b9debcfb61a0 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -106,8 +106,6 @@ MODULE_VERSION(DRV_VERSION);
* complete immediately.
*/
-struct au1000_private *au_macs[NUM_ETH_INTERFACES];
-
/*
* board-specific configurations
*
@@ -155,10 +153,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
if (force_reset || (!aup->mac_enabled)) {
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE), &aup->enable);
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -503,9 +501,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -1119,7 +1117,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* set a random MAC now in case platform_data doesn't provide one */
random_ether_addr(dev->dev_addr);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694b..4bebff3faeab 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void
ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
static unsigned int
ax_phy_ei_inbits(struct net_device *dev, int no)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
static int
ax_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
unsigned int result;
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
static void
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
- struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e86315b3f8..2e2b76258ab4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
__b44_set_flow_ctrl(bp, pause_enab);
}
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
- const char *str;
+ char buf[20];
u32 val;
int err;
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- str = nvram_get("boardnum");
- if (!str)
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
- if (simple_strtoul(str, NULL, 0) == 2) {
+ if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ecfef240a303..e94a966af418 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1097,7 +1097,7 @@ static int bcm_enet_stop(struct net_device *dev)
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
/* make sure no mib update is scheduled */
- flush_scheduled_work();
+ cancel_work_sync(&priv->mib_update_task);
/* disable dma & mac */
bcm_enet_disable_dma(priv, priv->tx_chan);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f66..add0b93350dd 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
+#define OC_NAME_BE OC_NAME "(be3)"
+#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
+#define EMULEX_VENDOR_ID 0x10df
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
-#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x710
+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
case OC_DEVICE_ID1:
return OC_NAME;
case OC_DEVICE_ID2:
- return OC_NAME1;
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
+ u8 msix_vec_idx;
struct napi_struct napi;
};
@@ -214,7 +220,9 @@ struct be_rx_obj {
struct be_rx_stats stats;
u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */
- u32 cache_line_barrier[16];
+ u16 last_frag_index;
+ u16 rsvd;
+ u32 cache_line_barrier[15];
};
struct be_vf_cfg {
@@ -234,7 +242,7 @@ struct be_adapter {
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* PCI config space */
- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
@@ -260,6 +268,8 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
+ u8 msix_vec_next_idx;
+
struct vlan_group *vlan_grp;
u16 vlans_added;
u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +309,8 @@ struct be_adapter {
bool sriov_enabled;
struct be_vf_cfg vf_cfg[BE_MAX_VF];
- u8 base_eq_id;
u8 is_virtfn;
+ u32 sli_family;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +319,8 @@ struct be_adapter {
#define BE_GEN2 2
#define BE_GEN3 3
+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
+
extern const struct ethtool_ops be_ethtool_ops;
#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +428,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
u8 data;
-
- pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
- pci_read_config_byte(adapter->pdev, 0xFE, &data);
- adapter->is_virtfn = (data != 0xAA);
+ u32 sli_intf;
+
+ if (lancer_chip(adapter)) {
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
+ &sli_intf);
+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+ } else {
+ pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
+ pci_read_config_byte(adapter->pdev, 0xFE, &data);
+ adapter->is_virtfn = (data != 0xAA);
+ }
}
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..0c7811faf72c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
{
- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+ u32 sem;
+
+ if (lancer_chip(adapter))
+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+ else
+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -462,7 +467,8 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@@ -476,7 +482,7 @@ int be_cmd_fw_init(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -491,7 +497,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@@ -505,7 +512,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
int be_cmd_eq_create(struct be_adapter *adapter,
@@ -516,7 +523,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -546,7 +554,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
eq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -558,7 +566,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -583,7 +592,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -667,7 +676,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -680,16 +690,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ req->page_size = 1; /* 1 for 4K */
+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+ ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+ ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+ ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+ }
- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
- __ilog2_u32(cq->len/256));
- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -701,7 +731,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
cq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -724,7 +754,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -737,13 +768,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+ ctxt, 1);
+
+ } else {
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ }
- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
- req->async_event_bitmap[0] |= 0x00000022;
+ req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -754,7 +799,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -769,7 +814,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -801,7 +847,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
txq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -816,7 +862,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -843,7 +890,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
*rss_id = resp->rss_id;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -862,7 +909,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -899,7 +947,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -915,7 +963,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
struct be_cmd_req_if_create *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -941,7 +990,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -955,7 +1004,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -970,7 +1020,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1060,7 +1110,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
struct be_cmd_req_get_fw_version *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1077,7 +1128,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1235,7 +1286,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+ memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
@@ -1322,7 +1373,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
struct be_cmd_req_query_fw_cfg *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1341,7 +1393,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
*caps = le32_to_cpu(resp->function_caps);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1352,7 +1404,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1365,7 +1418,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1376,7 +1429,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
u32 myhash[10];
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1396,7 +1450,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f30..83d15c8a9fa3 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
/******************** Create CQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
-struct amap_cq_context {
+struct amap_cq_context_be {
u8 cidx[11]; /* dword 0*/
u8 rsvd0; /* dword 0*/
u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3*/
} __packed;
+struct amap_cq_context_lancer {
+ u8 rsvd0[12]; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 rsvd1[12]; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd2; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 eqid[16]; /* dword 1*/
+ u8 rsvd3[15]; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd4[32]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
- u8 context[sizeof(struct amap_cq_context) / 8];
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context_be) / 8];
struct phys_addr pages[8];
} __packed;
+
struct be_cmd_resp_cq_create {
struct be_cmd_resp_hdr hdr;
u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
/******************** Create MCCQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
-struct amap_mcc_context {
+struct amap_mcc_context_be {
u8 con_index[14];
u8 rsvd0[2];
u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
u8 rsvd2[32];
} __packed;
+struct amap_mcc_context_lancer {
+ u8 async_cq_id[16];
+ u8 ring_size[4];
+ u8 rsvd0[12];
+ u8 rsvd1[31];
+ u8 valid;
+ u8 async_cq_valid[1];
+ u8 rsvd2[31];
+ u8 rsvd3[32];
+} __packed;
+
struct be_cmd_req_mcc_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
+ u16 cq_id;
u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context) / 8];
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
struct phys_addr pages[8];
} __packed;
@@ -605,6 +634,7 @@ struct be_hw_stats {
struct be_rxf_stats rxf;
u32 rsvd[48];
struct be_erx_stats erx;
+ u32 rsvd1[6];
};
struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 0f46366ecc48..b4be0271efe0 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -549,7 +549,9 @@ be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
- u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
+ static const u64 pattern[2] = {
+ 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
+ };
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d733..4096d9778234 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
#define PCICFG_UE_STATUS_LOW_MASK 0xA8
#define PCICFG_UE_STATUS_HI_MASK 0xAC
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET 0x58
+#define SLI_INTF_VALID_MASK 0xE0000000
+#define SLI_INTF_VALID 0xC0000000
+#define SLI_INTF_HINT2_MASK 0x1F000000
+#define SLI_INTF_HINT2_SHIFT 24
+#define SLI_INTF_HINT1_MASK 0x00FF0000
+#define SLI_INTF_HINT1_SHIFT 16
+#define SLI_INTF_FAMILY_MASK 0x00000F00
+#define SLI_INTF_FAMILY_SHIFT 8
+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT 12
+#define SLI_INTF_REV_MASK 0x000000F0
+#define SLI_INTF_REV_SHIFT 4
+#define SLI_INTF_FT_MASK 0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY 0x0
+#define LANCER_A0_SLI_FAMILY 0xA
+
+
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
+
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
@@ -85,6 +112,10 @@
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
+ placing at 11-15 */
+
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2ffbadc..de40d3b7152f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
}
/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
/* to account for hdr wrb */
cnt++;
- if (cnt & 1) {
+ if (lancer_chip(adapter) || !(cnt & 1)) {
+ *dummy = false;
+ } else {
/* add a dummy to make it an even num */
cnt++;
*dummy = true;
- } else
- *dummy = false;
+ }
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
hdr, skb_shinfo(skb)->gso_size);
- if (skb_is_gso_v6(skb))
+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+ if (lancer_chip(adapter) && adapter->sli_family ==
+ LANCER_A0_SLI_FAMILY) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ udpcs, hdr, 1);
+ }
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
if (copied) {
@@ -894,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
- for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxq_idx);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxq_idx, rxq->len);
+ /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+ if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
+
+ rxo->last_frag_index = rxq_idx;
+
+ for (i = 0; i < num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ index_inc(&rxq_idx, rxq->len);
+ }
}
}
@@ -999,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
- /* Is it a flush compl that has no data */
- if (unlikely(num_rcvd == 0))
- return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
@@ -1035,7 +1055,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
return;
}
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = swab16(vid);
+ if (!lancer_chip(adapter))
+ vid = swab16(vid);
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
} else {
netif_receive_skb(skb);
@@ -1057,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
- /* Is it a flush compl that has no data */
- if (unlikely(num_rcvd == 0))
- return;
-
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1113,7 +1130,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
napi_gro_frags(&eq_obj->napi);
} else {
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = swab16(vid);
+ if (!lancer_chip(adapter))
+ vid = swab16(vid);
if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
@@ -1330,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
be_rx_compl_discard(adapter, rxo, rxcp);
be_rx_compl_reset(rxcp);
- be_cq_notify(adapter, rx_cq->id, true, 1);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
}
/* Then free posted rx buffer that were not used */
@@ -1381,7 +1399,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
sent_skb = sent_skbs[txq->tail];
end_idx = txq->tail;
index_adv(&end_idx,
- wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+ txq->len);
be_tx_compl_process(adapter, end_idx);
}
}
@@ -1476,7 +1495,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
- adapter->base_eq_id = adapter->tx_eq.q.id;
+
+ adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
@@ -1554,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
+ /* Init last_frag_index so that the frag index in the first
+ * completion will never match */
+ rxo->last_frag_index = 0xffff;
rxo->rx_eq.max_eqd = BE_MAX_EQD;
rxo->rx_eq.enable_aic = true;
@@ -1568,6 +1592,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
+ rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
/* CQ */
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1604,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
if (rc)
goto err;
-
/* Rx Q */
q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1636,45 @@ err:
return -1;
}
-/* There are 8 evt ids per func. Retruns the evt id's bit number */
-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
+static bool event_peek(struct be_eq_obj *eq_obj)
{
- return eq_id - adapter->base_eq_id;
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ if (!eqe->evt)
+ return false;
+ else
+ return true;
}
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
struct be_rx_obj *rxo;
- int isr, i;
+ int isr, i, tx = 0 , rx = 0;
- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
- if (!isr)
- return IRQ_NONE;
+ if (lancer_chip(adapter)) {
+ if (event_peek(&adapter->tx_eq))
+ tx = event_handle(adapter, &adapter->tx_eq);
+ for_all_rx_queues(adapter, rxo, i) {
+ if (event_peek(&rxo->rx_eq))
+ rx |= event_handle(adapter, &rxo->rx_eq);
+ }
- if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
- event_handle(adapter, &adapter->tx_eq);
+ if (!(tx || rx))
+ return IRQ_NONE;
- for_all_rx_queues(adapter, rxo, i) {
- if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
- event_handle(adapter, &rxo->rx_eq);
+ } else {
+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
+
+ if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+ event_handle(adapter, &adapter->tx_eq);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+ event_handle(adapter, &rxo->rx_eq);
+ }
}
return IRQ_HANDLED;
@@ -1658,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
return IRQ_HANDLED;
}
-static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct be_eth_rx_compl *rxcp)
+static inline bool do_gro(struct be_rx_obj *rxo,
+ struct be_eth_rx_compl *rxcp, u8 err)
{
- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
if (err)
@@ -1678,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
struct be_queue_info *rx_cq = &rxo->cq;
struct be_eth_rx_compl *rxcp;
u32 work_done;
+ u16 frag_index, num_rcvd;
+ u8 err;
rxo->stats.rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
@@ -1685,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
if (!rxcp)
break;
- if (do_gro(adapter, rxo, rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
- else
- be_rx_compl_process(adapter, rxo, rxcp);
+ err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
+ frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
+ rxcp);
+ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
+ rxcp);
+
+ /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+ if (likely(frag_index != rxo->last_frag_index &&
+ num_rcvd != 0)) {
+ rxo->last_frag_index = frag_index;
+
+ if (do_gro(rxo, rxcp, err))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
+ else
+ be_rx_compl_process(adapter, rxo, rxcp);
+ }
be_rx_compl_reset(rxcp);
}
@@ -1830,8 +1884,7 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo);
}
}
-
- if (!adapter->ue_detected)
+ if (!adapter->ue_detected && !lancer_chip(adapter))
be_detect_dump_ue(adapter);
reschedule:
@@ -1910,10 +1963,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
#endif
}
-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
{
- return adapter->msix_entries[
- be_evt_bit_get(adapter, eq_id)].vector;
+ return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1977,14 @@ static int be_request_irq(struct be_adapter *adapter,
int vec;
sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
- vec = be_msix_vec_get(adapter, eq_obj->q.id);
+ vec = be_msix_vec_get(adapter, eq_obj);
return request_irq(vec, handler, 0, eq_obj->desc, context);
}
static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
void *context)
{
- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
+ int vec = be_msix_vec_get(adapter, eq_obj);
free_irq(vec, context);
}
@@ -2036,14 +2089,15 @@ static int be_close(struct net_device *netdev)
netif_carrier_off(netdev);
adapter->link_up = false;
- be_intr_set(adapter, false);
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
if (adapter->msix_enabled) {
- vec = be_msix_vec_get(adapter, tx_eq->q.id);
+ vec = be_msix_vec_get(adapter, tx_eq);
synchronize_irq(vec);
for_all_rx_queues(adapter, rxo, i) {
- vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
synchronize_irq(vec);
}
} else {
@@ -2082,7 +2136,8 @@ static int be_open(struct net_device *netdev)
be_irq_register(adapter);
- be_intr_set(adapter, true);
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, true);
/* The evt queues are created in unarmed state; arm them */
for_all_rx_queues(adapter, rxo, i) {
@@ -2343,10 +2398,10 @@ static int be_flash_data(struct be_adapter *adapter,
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
- struct flash_comp *pflashcomp;
+ const struct flash_comp *pflashcomp;
int num_comp;
- struct flash_comp gen3_flash_types[9] = {
+ static const struct flash_comp gen3_flash_types[9] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
@@ -2366,7 +2421,7 @@ static int be_flash_data(struct be_adapter *adapter,
{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
FLASH_NCSI_IMAGE_MAX_SIZE_g3}
};
- struct flash_comp gen2_flash_types[8] = {
+ static const struct flash_comp gen2_flash_types[8] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g2},
{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
@@ -2388,11 +2443,11 @@ static int be_flash_data(struct be_adapter *adapter,
if (adapter->generation == BE_GEN3) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
- num_comp = 9;
+ num_comp = ARRAY_SIZE(gen3_flash_types);
} else {
pflashcomp = gen2_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g2);
- num_comp = 8;
+ num_comp = ARRAY_SIZE(gen2_flash_types);
}
for (i = 0; i < num_comp; i++) {
if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
@@ -2458,6 +2513,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
int status, i = 0, num_imgs = 0;
const u8 *p;
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -EPERM;
+ }
+
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -2537,10 +2598,15 @@ static void be_netdev_init(struct net_device *netdev)
int i;
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_GRO | NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (lancer_chip(adapter))
+ netdev->vlan_features |= NETIF_F_TSO6;
netdev->flags |= IFF_MULTICAST;
@@ -2581,6 +2647,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
u8 __iomem *addr;
int pcicfg_reg, db_reg;
+ if (lancer_chip(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+ pci_resource_len(adapter->pdev, 0));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->db = addr;
+ return 0;
+ }
+
if (be_physfn(adapter)) {
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@@ -2671,7 +2746,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
}
memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
- spin_lock_init(&adapter->mbox_lock);
+ mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -2777,6 +2852,44 @@ static int be_get_config(struct be_adapter *adapter)
return 0;
}
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 sli_intf = 0, if_type;
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ case OC_DEVICE_ID3:
+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
+
+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+ if_type != 0x02) {
+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+ return -EINVAL;
+ }
+ if (num_vfs > 0) {
+ dev_err(&pdev->dev, "VFs not supported\n");
+ return -EINVAL;
+ }
+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT);
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+ return 0;
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -2799,22 +2912,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
-
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
+
+ status = be_dev_family_check(adapter);
+ if (status)
+ goto free_netdev;
+
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -2889,7 +2993,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_sriov_disable(adapter);
- free_netdev(adapter->netdev);
+ free_netdev(netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073b..6fa798468ad4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.18"
-#define DRV_MODULE_RELDATE "Oct 7, 2010"
+#define DRV_MODULE_VERSION "2.0.20"
+#define DRV_MODULE_RELDATE "Nov 24, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
int j;
rxr->rx_buf_ring =
- vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
if (rxr->rx_buf_ring == NULL)
return -ENOMEM;
- memset(rxr->rx_buf_ring, 0,
- SW_RXBD_RING_SIZE * bp->rx_max_ring);
-
for (j = 0; j < bp->rx_max_ring; j++) {
rxr->rx_desc_ring[j] =
dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
}
if (bp->rx_pg_ring_size) {
- rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+ rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bp->rx_max_pg_ring);
if (rxr->rx_pg_ring == NULL)
return -ENOMEM;
- memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
}
for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Wait for the current PCI transaction to complete before
* issuing a reset. */
- REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
- BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
- val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
- udelay(5);
+ if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+ (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+ REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+ BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+ val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+ udelay(5);
+ } else { /* 5709 */
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+ REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+ for (i = 0; i < 100; i++) {
+ msleep(1);
+ val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+ if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+ break;
+ }
+ }
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
- pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
} else {
val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -6801,28 +6811,30 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
u32 *p = _p, i, offset;
u8 *orig_p = _p;
struct bnx2 *bp = netdev_priv(dev);
- u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
- 0x0800, 0x0880, 0x0c00, 0x0c10,
- 0x0c30, 0x0d08, 0x1000, 0x101c,
- 0x1040, 0x1048, 0x1080, 0x10a4,
- 0x1400, 0x1490, 0x1498, 0x14f0,
- 0x1500, 0x155c, 0x1580, 0x15dc,
- 0x1600, 0x1658, 0x1680, 0x16d8,
- 0x1800, 0x1820, 0x1840, 0x1854,
- 0x1880, 0x1894, 0x1900, 0x1984,
- 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
- 0x1c80, 0x1c94, 0x1d00, 0x1d84,
- 0x2000, 0x2030, 0x23c0, 0x2400,
- 0x2800, 0x2820, 0x2830, 0x2850,
- 0x2b40, 0x2c10, 0x2fc0, 0x3058,
- 0x3c00, 0x3c94, 0x4000, 0x4010,
- 0x4080, 0x4090, 0x43c0, 0x4458,
- 0x4c00, 0x4c18, 0x4c40, 0x4c54,
- 0x4fc0, 0x5010, 0x53c0, 0x5444,
- 0x5c00, 0x5c18, 0x5c80, 0x5c90,
- 0x5fc0, 0x6000, 0x6400, 0x6428,
- 0x6800, 0x6848, 0x684c, 0x6860,
- 0x6888, 0x6910, 0x8000 };
+ static const u32 reg_boundaries[] = {
+ 0x0000, 0x0098, 0x0400, 0x045c,
+ 0x0800, 0x0880, 0x0c00, 0x0c10,
+ 0x0c30, 0x0d08, 0x1000, 0x101c,
+ 0x1040, 0x1048, 0x1080, 0x10a4,
+ 0x1400, 0x1490, 0x1498, 0x14f0,
+ 0x1500, 0x155c, 0x1580, 0x15dc,
+ 0x1600, 0x1658, 0x1680, 0x16d8,
+ 0x1800, 0x1820, 0x1840, 0x1854,
+ 0x1880, 0x1894, 0x1900, 0x1984,
+ 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+ 0x1c80, 0x1c94, 0x1d00, 0x1d84,
+ 0x2000, 0x2030, 0x23c0, 0x2400,
+ 0x2800, 0x2820, 0x2830, 0x2850,
+ 0x2b40, 0x2c10, 0x2fc0, 0x3058,
+ 0x3c00, 0x3c94, 0x4000, 0x4010,
+ 0x4080, 0x4090, 0x43c0, 0x4458,
+ 0x4c00, 0x4c18, 0x4c40, 0x4c54,
+ 0x4fc0, 0x5010, 0x53c0, 0x5444,
+ 0x5c00, 0x5c18, 0x5c80, 0x5c90,
+ 0x5fc0, 0x6000, 0x6400, 0x6428,
+ 0x6800, 0x6848, 0x684c, 0x6860,
+ 0x6888, 0x6910, 0x8000
+ };
regs->version = 0;
@@ -7914,15 +7926,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
+ bnx2_set_power_state(bp, PCI_D0);
+
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
*/
- pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
- BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
- BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
-
- bnx2_set_power_state(bp, PCI_D0);
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
@@ -8383,8 +8395,6 @@ bnx2_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- flush_scheduled_work();
-
unregister_netdev(dev);
if (bp->mips_firmware)
@@ -8421,7 +8431,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067d..5488a2e82fe9 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@ struct l2_fhdr {
#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
+#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
/*
* pci_reg definition
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 084afce89ae9..bb83a2961273 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225decaf..f14c6ed62bbb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -13,6 +13,8 @@
#ifndef BNX2X_H
#define BNX2X_H
+#include <linux/netdevice.h>
+#include <linux/types.h>
/* compilation time flags */
@@ -20,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.62.00-2"
+#define DRV_MODULE_RELDATE "2010/12/13"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -48,6 +50,7 @@
#include "bnx2x_fw_defs.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
+#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
/* error/debug prints */
@@ -199,10 +202,25 @@ void bnx2x_panic_dump(struct bnx2x *bp);
/* EQ completions */
#define HC_SP_INDEX_EQ_CONS 7
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
/* iSCSI L2 */
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
/**
* CIDs and CLIDs:
* CLIDs below is a CLID for func 0, then the CLID for other
@@ -215,12 +233,19 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define BNX2X_ISCSI_ETH_CL_ID 17
#define BNX2X_ISCSI_ETH_CID 17
+/* FCoE L2 */
+#define BNX2X_FCOE_ETH_CL_ID 18
+#define BNX2X_FCOE_ETH_CID 18
+
/** Additional rings budgeting */
#ifdef BCM_CNIC
#define CNIC_CONTEXT_USE 1
+#define FCOE_CONTEXT_USE 1
#else
#define CNIC_CONTEXT_USE 0
+#define FCOE_CONTEXT_USE 0
#endif /* BCM_CNIC */
+#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -401,6 +426,17 @@ struct bnx2x_fastpath {
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#ifdef BCM_CNIC
+/* FCoE L2 `fastpath' is right after the eth entries */
+#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#else
+#define IS_FCOE_FP(fp) false
+#define IS_FCOE_IDX(idx) false
+#endif
/* MC hsi */
@@ -669,8 +705,14 @@ struct bnx2x_port {
enum {
CAM_ETH_LINE = 0,
CAM_ISCSI_ETH_LINE,
- CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
+ CAM_FIP_ETH_LINE,
+ CAM_FIP_MCAST_LINE,
+ CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
};
+/* number of MACs per function in NIG memory - used for SI mode */
+#define NIG_LLH_FUNC_MEM_SIZE 16
+/* number of entries in NIG_REG_LLHX_FUNC_MEM */
+#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
#define BNX2X_VF_ID_INVALID 0xFF
@@ -710,6 +752,14 @@ enum {
*/
#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
+/*
+ * The number of FP-SB allocated by the driver == max number of regular L2
+ * queues + 1 for the CNIC which also consumes an FP-SB
+ */
+#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
+#define NUM_IGU_SB_REQUIRED(cid_cnt) \
+ (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
+
union cdu_context {
struct eth_context eth;
char pad[1024];
@@ -722,7 +772,8 @@ union cdu_context {
#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
-#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#endif
@@ -770,6 +821,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -918,6 +971,10 @@ struct bnx2x {
#define DISABLE_MSI_FLAG 0x200
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define MF_FUNC_DIS 0x1000
+#define FCOE_MACS_SET 0x2000
+#define NO_FCOE_FLAG 0x4000
+
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@@ -967,6 +1024,8 @@ struct bnx2x {
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
+#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
u8 wol;
@@ -1010,6 +1069,7 @@ struct bnx2x {
#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
#define BNX2X_ACCEPT_BROADCAST 0x0010
+#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
#define BNX2X_PROMISCUOUS_MODE 0x10000
u32 rx_mode;
@@ -1062,7 +1122,8 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
- u8 iscsi_mac[6];
+ u8 iscsi_mac[ETH_ALEN];
+ u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
@@ -1122,6 +1183,18 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ /* LLDP params */
+ struct bnx2x_config_lldp_params lldp_config_params;
+
+ /* DCBX params */
+ struct bnx2x_config_dcbx_params dcbx_config_params;
+
+ struct bnx2x_dcbx_port_params dcbx_port_params;
+ int dcb_version;
+
+ /* DCBX Negotation results */
+ struct dcbx_features dcbx_local_feat;
+ u32 dcbx_error;
};
/**
@@ -1152,10 +1225,17 @@ struct bnx2x {
#define RSS_IPV6_TCP_CAP 0x0008
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
+ (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
+
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
-#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1248,6 +1328,7 @@ struct bnx2x_client_ramrod_params {
u16 cl_id;
u32 cid;
u8 poll;
+#define CLIENT_IS_FCOE 0x01
#define CLIENT_IS_LEADING_RSS 0x02
u8 flags;
};
@@ -1280,11 +1361,54 @@ struct bnx2x_func_init_params {
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
+#define for_each_eth_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+ for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+
+#define for_each_napi_queue(bp, var) \
+ for (var = 0; \
+ var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_nondefault_queue(bp, var) \
- for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
+ for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+/* skip rx queue
+ * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+/* skip tx queue
+ * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
@@ -1329,7 +1453,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x) \
memset(x, 0, size); \
} while (0)
@@ -1337,7 +1461,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -1608,10 +1732,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
(T_ETH_MAC_COMMAND_INVALIDATE))
-#define CAM_INVALIDATE(x) \
- (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
-
-
/* Number of u32 elements in MC hash array */
#define MC_HASH_SIZE 8
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7bc..710ce5d04c53 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
mutex_unlock(&bp->port.phy_mutex);
}
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ /* Calculate the current MAX line speed limit for the DCC
+ * capable devices
+ */
+ if (IS_MF_SD(bp)) {
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ } else /* IS_MF_SI(bp)) */
+ line_speed = (line_speed * maxCfg) / 100;
+ }
+
+ return line_speed;
+}
+
void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
netif_carrier_on(bp->dev);
netdev_info(bp->dev, "NIC Link is Up, ");
- line_speed = bp->link_vars.line_speed;
- if (IS_MF(bp)) {
- u16 vn_max_rate;
+ line_speed = bnx2x_get_mf_speed(bp);
- vn_max_rate =
- ((bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
- if (vn_max_rate < line_speed)
- line_speed = vn_max_rate;
- }
pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -813,7 +827,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
if (!fp->disable_tpa) {
@@ -866,7 +880,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@@ -897,7 +911,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
u16 bd_cons = fp->tx_bd_cons;
@@ -915,7 +929,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int i, j;
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 0; i < NUM_RX_BD; i++) {
@@ -956,7 +970,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
"state %x\n", i, bp->msix_table[i + offset].vector,
bnx2x_fp(bp, i, state));
@@ -990,14 +1004,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
"(fastpath #%u)\n", msix_vec, msix_vec, i);
msix_vec++;
}
- req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1053,7 +1067,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
bp->dev->name, i);
@@ -1070,7 +1084,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
fp->state = BNX2X_FP_STATE_IRQ;
}
- i = BNX2X_NUM_QUEUES(bp);
+ i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_CONTEXT_USE;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
@@ -1117,7 +1131,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
@@ -1125,7 +1139,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@@ -1153,6 +1167,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
netif_tx_disable(bp->dev);
}
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef BCM_CNIC
+ struct bnx2x *bp = netdev_priv(dev);
+ if (NO_FCOE(bp))
+ return skb_tx_hash(dev, skb);
+ else {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr =
+ (struct vlan_ethhdr *)skb->data;
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe(bp, index);
+ }
+#endif
+ /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
+ */
+ return __skb_tx_hash(dev, skb,
+ dev->real_num_tx_queues - FCOE_CONTEXT_USE);
+}
+
void bnx2x_set_num_queues(struct bnx2x *bp)
{
switch (bp->multi_mode) {
@@ -1167,8 +1210,23 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
break;
}
+
+ /* Add special queues */
+ bp->num_queues += NONE_ETH_CONTEXT_USE;
}
+#ifdef BCM_CNIC
+static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp)) {
+ if (!IS_MF_SD(bp))
+ bnx2x_set_fip_eth_mac_addr(bp, 1);
+ bnx2x_set_all_enode_macs(bp, 1);
+ bp->flags |= FCOE_MACS_SET;
+ }
+}
+#endif
+
static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
@@ -1177,6 +1235,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
release_firmware(bp->firmware);
}
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+ int rc, num = bp->num_queues;
+
+#ifdef BCM_CNIC
+ if (NO_FCOE(bp))
+ num -= FCOE_CONTEXT_USE;
+
+#endif
+ netif_set_real_num_tx_queues(bp->dev, num);
+ rc = netif_set_real_num_rx_queues(bp->dev, num);
+ return rc;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1203,10 +1275,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
- netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
- rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+ rc = bnx2x_set_real_num_queues(bp);
if (rc) {
- BNX2X_ERR("Unable to update real_num_rx_queues\n");
+ BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
@@ -1214,6 +1285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
+#ifdef BCM_CNIC
+ /* We don't want TPA on FCoE L2 ring */
+ bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@@ -1296,6 +1371,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ bnx2x_dcbx_init(bp);
+
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_func_start(bp);
@@ -1344,6 +1421,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
+#ifdef BCM_CNIC
+ bnx2x_set_fcoe_eth_macs(bp);
+#endif
+
bnx2x_set_eth_mac(bp, 1);
if (bp->port.pmf)
@@ -1402,7 +1483,7 @@ load_error3:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
/* Release IRQs */
@@ -1473,7 +1554,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@@ -1577,6 +1658,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete(napi);
+ break;
+ }
+#endif
+
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@@ -1680,7 +1772,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
rc = XMIT_PLAIN;
else {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
rc = XMIT_CSUM_V6;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
@@ -1692,11 +1784,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
}
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (skb_is_gso_v6(skb))
+ rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+ else if (skb_is_gso(skb))
+ rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
return rc;
}
@@ -1782,15 +1873,15 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
@@ -1835,15 +1926,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+ *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
- skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+ *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
@@ -1912,6 +2003,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@@ -2033,8 +2125,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
- hlen = bnx2x_set_pbd_csum_e2(bp,
- skb, pbd_e2, xmit_type);
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2169,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
if (CHIP_IS_E2(bp))
- bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
@@ -2232,7 +2333,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* msix table */
- tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
+ tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
GFP_KERNEL);
if (!tbl)
goto alloc_err;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739c5302..03eb4d68e6bb 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
void bnx2x_link_report(struct bnx2x *bp);
/**
+ * calculates MF speed according to current linespeed and MF
+ * configuration
+ *
+ * @param bp
+ *
+ * @return u16
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
* MSI-X slowpath interrupt handler
*
* @param irq
@@ -232,6 +242,30 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
+#ifdef BCM_CNIC
+/**
+ * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). This function will wait until the ramdord completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
+
+/**
+ * Set/Clear ALL_ENODE mcast MAC.
+ *
+ * @param bp
+ * @param set
+ *
+ * @return int
+ */
+int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
+#endif
+
/**
* Set MAC filtering configurations.
*
@@ -290,6 +324,13 @@ int bnx2x_func_start(struct bnx2x *bp);
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
+ * Inintialize dcbx protocol
+ *
+ * @param bp
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp);
+
+/**
* Set power state to the requested value. Currently only D0 and
* D3hot are supported.
*
@@ -309,6 +350,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
@@ -685,7 +729,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
int i;
/* Add NAPI objects */
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -694,7 +738,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@@ -860,7 +904,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i, j;
- for_each_queue(bp, j) {
+ for_each_tx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -939,7 +983,30 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
}
}
+#ifdef BCM_CNIC
+static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, bp) = bp;
+ bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
+ bnx2x_fcoe(bp, index) = FCOE_IDX;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
+ BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
+ ETH_MAX_RX_CLIENTS_E1H);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
+ USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
+}
+#endif
static inline void __storm_memset_struct(struct bnx2x *bp,
u32 addr, size_t size, u32 *data)
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
new file mode 100644
index 000000000000..0b86480379ff
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,1491 @@
+/* bnx2x_dcb.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2010 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+
+/* forward declarations of dcbx related functions */
+static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets);
+static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
+
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+ u32 pri_bit, val = 0;
+ u8 pri;
+
+ /* Tx COS configuration */
+ if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
+ pfc_params.rx_cos0_priority_mask =
+ bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
+ if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
+ pfc_params.rx_cos1_priority_mask =
+ bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
+
+
+ /**
+ * Rx COS configuration
+ * Changing PFC RX configuration .
+ * In RX COS0 will always be configured to lossy and COS1 to lossless
+ */
+ for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
+ pri_bit = 1 << pri;
+
+ if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ val |= 1 << (pri * 4);
+ }
+
+ pfc_params.pkt_priority_to_cos = val;
+
+ /* RX COS0 */
+ pfc_params.llfc_low_priority_classes = 0;
+ /* RX COS1 */
+ pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /* BRB configuration */
+ pfc_params.cos0_pauseable = false;
+ pfc_params.cos1_pauseable = true;
+
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+ nig_params.pause_enable = 1;
+#ifdef BNX2X_SAFC
+ if (bp->flags & SAFC_TX_FLAG) {
+ u32 high = 0, low = 0;
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
+ if (bp->pri_map[i] == 1)
+ high |= (1 << i);
+ if (bp->pri_map[i] == 0)
+ low |= (1 << i);
+ }
+
+ nig_params.llfc_low_priority_classes = high;
+ nig_params.llfc_low_priority_classes = low;
+
+ nig_params.pause_enable = 0;
+ nig_params.llfc_enable = 1;
+ nig_params.llfc_out_en = 1;
+ }
+#endif /* BNX2X_SAFC */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ u8 i = 0;
+ DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+ /* PG */
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+ for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+ DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+ DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+ /* pfc */
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ features->pfc.pri_en_bitmap);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ features->pfc.pfc_caps);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ features->pfc.enabled);
+
+ DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ features->app.default_pri);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ features->app.tc_supported);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ features->app.enabled);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+ i, features->app.app_pri_tbl[i].app_id);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+ i, features->app.app_pri_tbl[i].pri_bitmap);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+ i, features->app.app_pri_tbl[i].appBitfield);
+ }
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+ u8 pri_bitmap,
+ u8 llfc_traf_type)
+{
+ u32 pri = MAX_PFC_PRIORITIES;
+ u32 index = MAX_PFC_PRIORITIES - 1;
+ u32 pri_mask;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ /* Choose the highest priority */
+ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+ pri_mask = 1 << index;
+ if (GET_FLAGS(pri_bitmap, pri_mask))
+ pri = index ;
+ index--;
+ }
+
+ if (pri < MAX_PFC_PRIORITIES)
+ ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+ struct dcbx_app_priority_feature *app,
+ u32 error) {
+ u8 index;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+
+ if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) {
+
+ bp->dcbx_port_params.app.enabled = true;
+
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = 0;
+
+ if (app->default_pri < MAX_PFC_PRIORITIES)
+ ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+ for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+ struct dcbx_app_priority_entry *entry =
+ app->app_pri_tbl;
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_FCOE);
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_ISCSI);
+ }
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ bp->dcbx_port_params.app.enabled = false;
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+ }
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ u32 error) {
+ int i = 0;
+ u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+ struct pg_help_data pg_help_data;
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+
+ if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+
+
+ /* Clean up old settings of ets on COS */
+ for (i = 0; i < E2_NUM_OF_COS ; i++) {
+
+ cos_params[i].pauseable = false;
+ cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+ cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+ cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
+ }
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
+ ets->enabled) {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ bp->dcbx_port_params.ets.enabled = true;
+
+ bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+ pg_pri_orginal_spread,
+ ets->pri_pg_tbl);
+
+ bnx2x_dcbx_get_num_pg_traf_type(bp,
+ pg_pri_orginal_spread,
+ &pg_help_data);
+
+ bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+ ets, pg_pri_orginal_spread);
+
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ bp->dcbx_port_params.ets.enabled = false;
+ ets->pri_pg_tbl[0] = 0;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+ DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+ }
+}
+
+static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+ struct dcbx_pfc_feature *pfc, u32 error)
+{
+
+ if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) &&
+ pfc->enabled) {
+ bp->dcbx_port_params.pfc.enabled = true;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+ ~(pfc->pri_en_bitmap);
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ bp->dcbx_port_params.pfc.enabled = false;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+ }
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+ bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+ bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+ u32 *base_mib_addr,
+ u32 offset,
+ int read_mib_type)
+{
+ int max_try_read = 0, i;
+ u32 *buff, mib_size, prefix_seq_num, suffix_seq_num;
+ struct lldp_remote_mib *remote_mib ;
+ struct lldp_local_mib *local_mib;
+
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ mib_size = sizeof(struct lldp_local_mib);
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ mib_size = sizeof(struct lldp_remote_mib);
+ break;
+ default:
+ return 1; /*error*/
+ }
+
+ offset += BP_PORT(bp) * mib_size;
+
+ do {
+ buff = base_mib_addr;
+ for (i = 0; i < mib_size; i += 4, buff++)
+ *buff = REG_RD(bp, offset + i);
+
+ max_try_read++;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ local_mib = (struct lldp_local_mib *) base_mib_addr;
+ prefix_seq_num = local_mib->prefix_seq_num;
+ suffix_seq_num = local_mib->suffix_seq_num;
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+ prefix_seq_num = remote_mib->prefix_seq_num;
+ suffix_seq_num = remote_mib->suffix_seq_num;
+ break;
+ default:
+ return 1; /*error*/
+ }
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+ if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+ BNX2X_ERR("MIB could not be read\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp)) {
+ if (BP_PORT(bp)) {
+ BNX2X_ERR("4 port mode is not supported");
+ return;
+ }
+
+ if (bp->dcbx_port_params.pfc.enabled)
+
+ /* 1. Fills up common PFC structures if required.*/
+ /* 2. Configure NIG, MAC and BRB via the elink:
+ * elink must first check if BMAC is not in reset
+ * and only then configures the BMAC
+ * Or, configure EMAC.
+ */
+ bnx2x_pfc_set(bp);
+
+ else
+ bnx2x_pfc_clear(bp);
+ }
+}
+
+static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n");
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+ 0 /* connectionless */,
+ 0 /* dataHi is zero */,
+ 0 /* dataLo is zero */,
+ 1 /* common */);
+}
+
+static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+ bnx2x_pfc_fw_struct_e2(bp);
+ DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+ 0, /* connectionless */
+ U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
+ U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
+ 1 /* commmon */);
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ u8 status = 0;
+
+ bnx2x_ets_disabled(&bp->link_params);
+
+ if (!ets->enabled)
+ return;
+
+ if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
+ BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
+ return;
+ }
+
+ /* valid COS entries */
+ if (ets->num_of_cos == 1) /* no ETS */
+ return;
+
+ /* sanity */
+ if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+ ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+ BNX2X_ERR("all COS should have at least bw_limit or strict"
+ "ets->cos_params[0].strict= %x"
+ "ets->cos_params[0].bw_tbl= %x"
+ "ets->cos_params[1].strict= %x"
+ "ets->cos_params[1].bw_tbl= %x",
+ ets->cos_params[0].strict,
+ ets->cos_params[0].bw_tbl,
+ ets->cos_params[1].strict,
+ ets->cos_params[1].bw_tbl);
+ return;
+ }
+ /* If we join a group and there is bw_tbl and strict then bw rules */
+ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+ (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+ u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+ u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+ /* Do not allow 0-100 configuration
+ * since PBF does not support it
+ * force 1-99 instead
+ */
+ if (bw_tbl_0 == 0) {
+ bw_tbl_0 = 1;
+ bw_tbl_1 = 99;
+ } else if (bw_tbl_1 == 0) {
+ bw_tbl_1 = 1;
+ bw_tbl_0 = 99;
+ }
+
+ bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+ } else {
+ if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
+ status = bnx2x_ets_strict(&bp->link_params, 0);
+ else if (ets->cos_params[1].strict
+ == BNX2X_DCBX_COS_HIGH_STRICT)
+ status = bnx2x_ets_strict(&bp->link_params, 1);
+
+ if (status)
+ BNX2X_ERR("update_ets_params failed\n");
+ }
+}
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+ struct lldp_local_mib local_mib = {0};
+ u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+ if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+ return -EINVAL;
+ }
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+ DCBX_READ_LOCAL_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read local mib from FW\n");
+ return rc;
+ }
+
+ /* save features and error */
+ bp->dcbx_local_feat = local_mib.features;
+ bp->dcbx_error = local_mib.error;
+ return 0;
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
+ {
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+ bnx2x_dcbx_stop_hw_tx(bp);
+ return;
+ }
+ /* fall through */
+ }
+ case BNX2X_DCBX_STATE_TX_PAUSED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ bnx2x_pfc_set_pfc(bp);
+
+ bnx2x_dcbx_update_ets_params(bp);
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+ bnx2x_dcbx_resume_hw_tx(bp);
+ return;
+ }
+ /* fall through */
+ case BNX2X_DCBX_STATE_TX_RELEASED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+
+ return;
+ default:
+ BNX2X_ERR("Unknown DCBX_STATE\n");
+ }
+}
+
+
+#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\
+ sizeof(struct lldp_dcbx_stat))
+
+/* calculate struct offset in array according to chip information */
+#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params))
+
+#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
+ BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_params lldp_params = {0};
+ u32 i = 0, *buff = NULL;
+ u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp);
+
+ DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset);
+
+ if ((bp->lldp_config_params.overwrite_settings ==
+ BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) {
+ /* Read the data first */
+ buff = (u32 *)&lldp_params;
+ for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
+ *buff = REG_RD(bp, (offset + i));
+
+ lldp_params.msg_tx_hold =
+ (u8)bp->lldp_config_params.msg_tx_hold;
+ lldp_params.msg_fast_tx_interval =
+ (u8)bp->lldp_config_params.msg_fast_tx;
+ lldp_params.tx_crd_max =
+ (u8)bp->lldp_config_params.tx_credit_max;
+ lldp_params.msg_tx_interval =
+ (u8)bp->lldp_config_params.msg_tx_interval;
+ lldp_params.tx_fast =
+ (u8)bp->lldp_config_params.tx_fast;
+
+ /* Write the data.*/
+ buff = (u32 *)&lldp_params;
+ for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
+ REG_WR(bp, (offset + i) , *buff);
+
+
+ } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ bp->lldp_config_params.overwrite_settings)
+ bp->lldp_config_params.overwrite_settings =
+ BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
+}
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_admin_mib admin_mib;
+ u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+ u32 *buff;
+ u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+ /*shortcuts*/
+ struct dcbx_features *af = &admin_mib.features;
+ struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+ memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+ buff = (u32 *)&admin_mib;
+ /* Read the data first */
+ for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
+ *buff = REG_RD(bp, (offset + i));
+
+
+ if (BNX2X_DCBX_CONFIG_INV_VALUE != dp->admin_dcbx_enable) {
+ if (dp->admin_dcbx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ }
+
+ if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ dp->overwrite_settings)) {
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+ admin_mib.ver_cfg_flags |=
+ (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+ DCBX_CEE_VERSION_MASK;
+
+ af->ets.enabled = (u8)dp->admin_ets_enable;
+
+ af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+ /* FOR IEEE dp->admin_tc_supported_tx_enable */
+ if (dp->admin_ets_configuration_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ /* For IEEE admin_ets_recommendation_tx_enable */
+ if (dp->admin_pfc_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+
+ if (dp->admin_application_priority_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+
+ if (dp->admin_ets_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ /* For IEEE admin_ets_reco_valid */
+ if (dp->admin_pfc_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+ if (dp->admin_app_priority_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+ for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+ DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+ (u8)dp->admin_configuration_bw_precentage[i]);
+
+ DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+ }
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+ (u8)dp->admin_configuration_ets_pg[i]);
+
+ DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+ }
+
+ /*For IEEE admin_recommendation_bw_precentage
+ *For IEEE admin_recommendation_ets_pg */
+ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+ for (i = 0; i < 4; i++) {
+ if (dp->admin_priority_app_table[i].valid) {
+ struct bnx2x_admin_priority_app_table *table =
+ dp->admin_priority_app_table;
+ if ((ETH_TYPE_FCOE == table[i].app_id) &&
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+ traf_type = FCOE_APP_IDX;
+ else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+ (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+ traf_type = ISCSI_APP_IDX;
+ else
+ traf_type = other_traf_type++;
+
+ af->app.app_pri_tbl[traf_type].app_id =
+ table[i].app_id;
+
+ af->app.app_pri_tbl[traf_type].pri_bitmap =
+ (u8)(1 << table[i].priority);
+
+ af->app.app_pri_tbl[traf_type].appBitfield =
+ (DCBX_APP_ENTRY_VALID);
+
+ af->app.app_pri_tbl[traf_type].appBitfield |=
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+ DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+ }
+ }
+
+ af->app.default_pri = (u8)dp->admin_default_priority;
+
+ } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ dp->overwrite_settings)
+ dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
+
+ /* Write the data. */
+ buff = (u32 *)&admin_mib;
+ for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
+ REG_WR(bp, (offset + i), *buff);
+}
+
+/* default */
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+ bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+ bp->dcbx_config_params.dcb_enable = 1;
+ bp->dcbx_config_params.admin_dcbx_enable = 1;
+ bp->dcbx_config_params.admin_ets_willing = 1;
+ bp->dcbx_config_params.admin_pfc_willing = 1;
+ bp->dcbx_config_params.overwrite_settings = 1;
+ bp->dcbx_config_params.admin_ets_enable = 1;
+ bp->dcbx_config_params.admin_pfc_enable = 1;
+ bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_reco_valid = 1;
+ bp->dcbx_config_params.admin_app_priority_willing = 1;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
+ bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
+ bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
+ bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
+ bp->dcbx_config_params.admin_default_priority =
+ bp->dcbx_config_params.admin_priority_app_table[1].priority;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp)
+{
+ u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* validate:
+ * chip of good for dcbx version,
+ * dcb is wanted
+ * the function is pmf
+ * shmem2 contains DCBX support fields
+ */
+ DP(NETIF_MSG_LINK, "dcb_enable %d bp->port.pmf %d\n",
+ bp->dcbx_config_params.dcb_enable, bp->port.pmf);
+
+ if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp) &&
+ bp->dcbx_config_params.dcb_enable &&
+ bp->port.pmf &&
+ SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+ dcbx_lldp_params_offset = SHMEM2_RD(bp,
+ dcbx_lldp_params_offset);
+ DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ dcbx_lldp_params_offset);
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+ bnx2x_dcbx_lldp_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ /* set default configuration BC has */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ }
+ }
+}
+
+void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
+{
+ struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
+ u32 i = 0, addr;
+ memset(pricos, 0, sizeof(pricos));
+ /* Default initialization */
+ for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
+ pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
+
+ /* Store per port struct to internal memory */
+ addr = BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port,
+ traffic_type_to_priority_cos);
+ __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
+
+
+ /* LLFC disabled.*/
+ REG_WR8(bp , BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port, llfc_mode),
+ LLFC_MODE_NONE);
+
+ /* DCBX disabled.*/
+ REG_WR8(bp , BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port, dcb_enabled),
+ DCB_DISABLED);
+}
+
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+ struct flow_control_configuration *pfc_fw_cfg)
+{
+ u8 pri = 0;
+ u8 cos = 0;
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+ DP(NETIF_MSG_LINK,
+ "pdev->params.dcbx_port_params.pfc."
+ "priority_non_pauseable_mask %x\n",
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+ for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pri_bitmask %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].bw_tbl %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].strict %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pauseable %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ }
+
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
+ "priority %x\n", pri,
+ pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+ }
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data)
+{
+ bool pg_found = false;
+ u32 i, traf_type, add_traf_type, add_pg;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+
+ /* Set to invalid */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ data[i].pg = DCBX_ILLEGAL_PG;
+
+ for (add_traf_type = 0;
+ add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+ pg_found = false;
+ if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+ add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+ for (traf_type = 0;
+ traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+ traf_type++) {
+ if (data[traf_type].pg == add_pg) {
+ if (!(data[traf_type].pg_priority &
+ (1 << ttp[add_traf_type])))
+ data[traf_type].
+ num_of_dif_pri++;
+ data[traf_type].pg_priority |=
+ (1 << ttp[add_traf_type]);
+ pg_found = true;
+ break;
+ }
+ }
+ if (false == pg_found) {
+ data[help_data->num_of_pg].pg = add_pg;
+ data[help_data->num_of_pg].pg_priority =
+ (1 << ttp[add_traf_type]);
+ data[help_data->num_of_pg].num_of_dif_pri = 1;
+ help_data->num_of_pg++;
+ }
+ }
+ DP(NETIF_MSG_LINK,
+ "add_traf_type %d pg_found %s num_of_pg %d\n",
+ add_traf_type, (false == pg_found) ? "NO" : "YES",
+ help_data->num_of_pg);
+ }
+}
+
+
+/*******************************************************************************
+ * Description: single priority group
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+{
+ /* Only one priority than only one COS */
+ cos_data->data[0].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask = pri_join_mask;
+ cos_data->data[0].cos_bw = 100;
+ cos_data->num_of_cos = 1;
+}
+
+/*******************************************************************************
+ * Description: updating the cos bw
+ *
+ * Return:
+ ******************************************************************************/
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+ struct cos_entry_help_data *data,
+ u8 pg_bw)
+{
+ if (data->cos_bw == DCBX_INVALID_COS_BW)
+ data->cos_bw = pg_bw;
+ else
+ data->cos_bw += pg_bw;
+}
+
+/*******************************************************************************
+ * Description: single priority group
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets)
+{
+ u32 pri_tested = 0;
+ u8 i = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pausable = true;
+ cos_data->data[1].pausable = false;
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+ for (i = 0 ; i < num_of_pri ; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+
+ if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ entry = 1;
+ } else {
+ cos_data->data[0].pri_join_mask |= pri_tested;
+ entry = 0;
+ }
+ pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+ app.traffic_type_priority[i]];
+ /* There can be only one strict pg */
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+ else
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[entry].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ if ((0 == cos_data->data[0].pri_join_mask) &&
+ (0 == cos_data->data[1].pri_join_mask))
+ BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u32 pri_mask_without_pri = 0;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ /*debug*/
+ if (num_of_dif_pri == 1) {
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+ return;
+ }
+ /* single priority group */
+ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable.*/
+ cos_data->data[1].pausable = false;
+
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ }
+
+ if (3 == num_of_dif_pri) {
+ if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+ pri_join_mask))) {
+ cos_data->data[0].cos_bw = 33;
+ cos_data->data[1].cos_bw = 67;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ }
+
+ } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+ /* If there are only pauseable priorities,
+ * then one/two priorities go to the first queue
+ * and one priority goes to the second queue.
+ */
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ cos_data->data[1].pausable = true;
+ cos_data->data[0].pausable = true;
+ /* All priorities except FCOE */
+ cos_data->data[0].pri_join_mask = (pri_join_mask &
+ ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+ /* Only FCOE priority.*/
+ cos_data->data[1].pri_join_mask =
+ (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+ } else
+ /* If there are only non-pauseable priorities,
+ * they will all go to the same queue.
+ */
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ } else {
+ /* priority group which is not BW limited (PG#15):*/
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* If there are both pauseable and non-pauseable
+ * priorities, the pauseable priorities go to the first
+ * queue and the non-pauseable priorities
+ * go to the second queue.
+ */
+ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_LOW_STRICT;
+ } else {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_COS_LOW_STRICT;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pause-able.*/
+ cos_data->data[1].pausable = false;
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable,* the lower priorities go
+ * to the first queue and the higherpriorities go
+ * to the second queue.
+ */
+ cos_data->data[0].pausable =
+ cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ /* Remove priority tested */
+ pri_mask_without_pri =
+ (pri_join_mask & ((u8)(~pri_tested)));
+ if (pri_mask_without_pri < pri_tested)
+ break;
+ }
+
+ if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+ BNX2X_ERR("Invalid value for pri_join_mask -"
+ " could not find a priority\n");
+
+ cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+ cos_data->data[1].pri_join_mask = pri_tested;
+ /* Both queues are strict priority,
+ * and that with the highest priority
+ * gets the highest strict priority in the arbiter.
+ */
+ cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
+ cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ }
+}
+
+static void bnx2x_dcbx_two_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u8 pg[E2_NUM_OF_COS] = {0};
+
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[0].pg_priority) ||
+ IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[1].pg_priority)) {
+ /* If one PG contains both pauseable and
+ * non-pauseable priorities then ETS is disabled.
+ */
+ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+ pg_pri_orginal_spread, ets);
+ bp->dcbx_port_params.ets.enabled = false;
+ return;
+ }
+
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable. */
+ cos_data->data[1].pausable = false;
+ if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+ pg_help_data->data[0].pg_priority)) {
+ /* 0 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ } else {/* 1 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[0] = pg_help_data->data[1].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[1] = pg_help_data->data[0].pg;
+ }
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable, each PG goes to a queue.
+ */
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ }
+
+ /* There can be only one strict pg */
+ for (i = 0 ; i < E2_NUM_OF_COS; i++) {
+ if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ cos_data->data[i].cos_bw =
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+ else
+ cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+}
+
+/*******************************************************************************
+ * Description: Still
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_three_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ bool b_found_strict = false;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and the
+ * non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+ bnx2x_dcbx_separate_pauseable_from_non(bp,
+ cos_data, pg_pri_orginal_spread, ets);
+ else {
+ /* If two BW-limited PG-s were combined to one queue,
+ * the BW is their sum.
+ *
+ * If there are only pauseable priorities or only non-pauseable,
+ * and there are both BW-limited and non-BW-limited PG-s,
+ * the BW-limited PG/s go to one queue and the non-BW-limited
+ * PG/s go to the second queue.
+ *
+ * If there are only pauseable priorities or only non-pauseable
+ * and all are BW limited, then two priorities go to the first
+ * queue and one priority goes to the second queue.
+ *
+ * We will join this two cases:
+ * if one is BW limited it will go to the secoend queue
+ * otherwise the last priority will get it
+ */
+
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < num_of_pri; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ pg_entry = (u8)pg_pri_orginal_spread[bp->
+ dcbx_port_params.app.traffic_type_priority[i]];
+
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ entry = 0;
+
+ if (i == (num_of_pri-1) &&
+ false == b_found_strict)
+ /* last entry will be handled separately
+ * If no priority is strict than last
+ * enty goes to last queue.*/
+ entry = 1;
+ cos_data->data[entry].pri_join_mask |=
+ pri_tested;
+ bnx2x_dcbx_add_to_cos_bw(bp,
+ &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_entry));
+ } else {
+ b_found_strict = true;
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ }
+ }
+}
+
+
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data ;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* default settings */
+ cos_data.num_of_cos = 2;
+ for (i = 0; i < E2_NUM_OF_COS ; i++) {
+ cos_data.data[i].pri_join_mask = pri_join_mask;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ switch (help_data->num_of_pg) {
+ case 1:
+
+ bxn2x_dcbx_single_pg_to_cos_params(
+ bp,
+ help_data,
+ &cos_data,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ case 2:
+ bnx2x_dcbx_two_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+
+ case 3:
+ bnx2x_dcbx_three_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
+ break;
+ default:
+ BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ &cos_data, pri_join_mask);
+ }
+
+ for (i = 0; i < cos_data.num_of_cos ; i++) {
+ struct bnx2x_dcbx_cos_params *params =
+ &bp->dcbx_port_params.ets.cos_params[i];
+
+ params->pauseable = cos_data.data[i].pausable;
+ params->strict = cos_data.data[i].strict;
+ params->bw_tbl = cos_data.data[i].cos_bw;
+ if (params->pauseable) {
+ params->pri_bitmask =
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ cos_data.data[i].pri_join_mask);
+ DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ } else {
+ params->pri_bitmask =
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ cos_data.data[i].pri_join_mask);
+ DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
+ "0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ }
+ }
+
+ bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl)
+{
+ int i;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+ DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ i, set_configuration_ets_pg[i]);
+ }
+}
+
+/*******************************************************************************
+ * Description: Fill pfc_config struct that will be sent in DCBX start ramrod
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
+{
+ struct flow_control_configuration *pfc_fw_cfg = 0;
+ u16 pri_bit = 0;
+ u8 cos = 0, pri = 0;
+ struct priority_cos *tt2cos;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ pfc_fw_cfg = (struct flow_control_configuration *)
+ bnx2x_sp(bp, pfc_config);
+ memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration));
+
+ /*shortcut*/
+ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+ /* Fw version should be incremented each update */
+ pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+ pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
+
+ /* Default initialization */
+ for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
+ tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
+ tt2cos[pri].cos = 0;
+ }
+
+ /* Fill priority parameters */
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ tt2cos[pri].priority = ttp[pri];
+ pri_bit = 1 << tt2cos[pri].priority;
+
+ /* Fill COS parameters based on COS calculated to
+ * make it more generally for future use */
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+ if (bp->dcbx_port_params.ets.cos_params[cos].
+ pri_bitmask & pri_bit)
+ tt2cos[pri].cos = cos;
+ }
+ bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
+}
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
new file mode 100644
index 000000000000..8dea56b511f5
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,193 @@
+/* bnx2x_dcb.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2010 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+ u32 enabled;
+ u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define E2_NUM_OF_COS 2
+#define BNX2X_DCBX_COS_NOT_STRICT 0
+#define BNX2X_DCBX_COS_LOW_STRICT 1
+#define BNX2X_DCBX_COS_HIGH_STRICT 2
+
+struct bnx2x_dcbx_cos_params {
+ u32 bw_tbl;
+ u32 pri_bitmask;
+ u8 strict;
+ u8 pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+ u32 enabled;
+ u8 num_of_cos; /* valid COS entries */
+ struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
+};
+
+struct bnx2x_dcbx_pfc_params {
+ u32 enabled;
+ u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+ u32 dcbx_enabled;
+ struct bnx2x_dcbx_pfc_params pfc;
+ struct bnx2x_dcbx_pg_params ets;
+ struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
+
+/*******************************************************************************
+ * LLDP protocol configuration parameters.
+ ******************************************************************************/
+struct bnx2x_config_lldp_params {
+ u32 overwrite_settings;
+ u32 msg_tx_hold;
+ u32 msg_fast_tx;
+ u32 tx_credit_max;
+ u32 msg_tx_interval;
+ u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+ u32 valid;
+ u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
+ u32 traffic_type;
+#define TRAFFIC_TYPE_ETH 0
+#define TRAFFIC_TYPE_PORT 1
+ u32 app_id;
+};
+
+/*******************************************************************************
+ * DCBX protocol configuration parameters.
+ ******************************************************************************/
+struct bnx2x_config_dcbx_params {
+ u32 dcb_enable;
+ u32 admin_dcbx_enable;
+ u32 overwrite_settings;
+ u32 admin_dcbx_version;
+ u32 admin_ets_enable;
+ u32 admin_pfc_enable;
+ u32 admin_tc_supported_tx_enable;
+ u32 admin_ets_configuration_tx_enable;
+ u32 admin_ets_recommendation_tx_enable;
+ u32 admin_pfc_tx_enable;
+ u32 admin_application_priority_tx_enable;
+ u32 admin_ets_willing;
+ u32 admin_ets_reco_valid;
+ u32 admin_pfc_willing;
+ u32 admin_app_priority_willing;
+ u32 admin_configuration_bw_precentage[8];
+ u32 admin_configuration_ets_pg[8];
+ u32 admin_recommendation_bw_precentage[8];
+ u32 admin_recommendation_ets_pg[8];
+ u32 admin_pfc_bitmap;
+ struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits) ((flags) & (bits))
+#define SET_FLAGS(flags, bits) ((flags) |= (bits))
+#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
+
+enum {
+ DCBX_READ_LOCAL_MIB,
+ DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE (0x8906)
+#define TCP_PORT_ISCSI (0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE (512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
+ ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
+
+
+
+struct cos_entry_help_data {
+ u32 pri_join_mask;
+ u32 cos_bw;
+ u8 strict;
+ bool pausable;
+};
+
+struct cos_help_data {
+ struct cos_entry_help_data data[E2_NUM_OF_COS];
+ u8 num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG (0xFF)
+#define DCBX_PFC_PRI_MASK (0xFF)
+#define DCBX_STRICT_PRIORITY (15)
+#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
+ ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
+ ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
+ ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
+ (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
+ (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+ ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
+ (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+
+struct pg_entry_help_data {
+ u8 num_of_dif_pri;
+ u8 pg;
+ u32 pg_priority;
+};
+
+struct pg_help_data {
+ struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+ u8 num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+
+enum {
+ BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+ BNX2X_DCBX_STATE_TX_PAUSED = 0x2,
+ BNX2X_DCBX_STATE_TX_RELEASED = 0x4
+};
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0e..99c672d894ca 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,6 +25,143 @@
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN 4
+static const struct {
+ long offset;
+ int size;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+ { Q_STATS_OFFSET32(error_bytes_received_hi),
+ 8, "[%s]: rx_error_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, "[%s]: rx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, "[%s]: rx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, "[%s]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ { Q_STATS_OFFSET32(rx_err_discard_pkt),
+ 4, "[%s]: rx_phy_ip_err_discards"},
+ { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, "[%s]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, "[%s]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%s]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%s]: tx_bcast_packets" }
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+ long offset;
+ int size;
+ u32 flags;
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+ char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
+ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+ { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+ { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
+ { STATS_OFFSET32(no_buff_discard_hi),
+ 8, STATS_FLAGS_BOTH, "rx_discards" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(xxoverflow_discard),
+ 4, STATS_FLAGS_PORT, "rx_fw_discards" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+ { STATS_OFFSET32(pause_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "rx_pause_frames" },
+ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(nig_timer_max),
+ 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+ { STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
+ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+ { STATS_OFFSET32(pause_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "tx_pause_frames" }
+};
+
+#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -45,14 +182,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = bp->link_params.req_line_speed[cfg_idx];
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
- if (IS_MF(bp)) {
- u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
- 100;
- if (vn_max_rate < cmd->speed)
- cmd->speed = vn_max_rate;
- }
+ if (IS_MF(bp))
+ cmd->speed = bnx2x_get_mf_speed(bp);
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
@@ -87,18 +219,57 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+ u32 speed;
- if (IS_MF(bp))
+ if (IS_MF_SD(bp))
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
- DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
- DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+ cmd->speed_hi,
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ speed = cmd->speed;
+ speed |= (cmd->speed_hi << 16);
+
+ if (IS_MF_SI(bp)) {
+ u32 param = 0;
+ u32 line_speed = bp->link_vars.line_speed;
+
+ /* use 10G if no link detected */
+ if (!line_speed)
+ line_speed = 10000;
+
+ if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+ BNX2X_DEV_INFO("To set speed BC %X or higher "
+ "is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
+ return -EINVAL;
+ }
+ if (line_speed < speed) {
+ BNX2X_DEV_INFO("New speed should be less or equal "
+ "to actual line speed\n");
+ return -EINVAL;
+ }
+ /* load old values */
+ param = bp->mf_config[BP_VN(bp)];
+
+ /* leave only MIN value */
+ param &= FUNC_MF_CFG_MIN_BW_MASK;
+
+ /* set new MAX value */
+ param |= (((speed * 100) / line_speed)
+ << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
+ return 0;
+ }
+
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
switch (cmd->port) {
@@ -168,8 +339,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
- u32 speed = cmd->speed;
- speed |= (cmd->speed_hi << 16);
switch (speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL) {
@@ -1286,7 +1455,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, (wr_val & mask));
+ REG_WR(bp, offset, wr_val & mask);
val = REG_RD(bp, offset);
@@ -1499,8 +1668,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
* updates that have been performed while interrupts were
* disabled.
*/
- if (bp->common.int_block == INT_BLOCK_IGU)
+ if (bp->common.int_block == INT_BLOCK_IGU) {
+ /* Disable local BHes to prevent a dead-lock situation between
+ * sch_direct_xmit() and bnx2x_run_loopback() (calling
+ * bnx2x_tx_int()), as both are taking netif_tx_lock().
+ */
+ local_bh_disable();
bnx2x_tx_int(fp_tx);
+ local_bh_enable();
+ }
rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
if (rx_idx != rx_start_idx + num_pkts)
@@ -1650,7 +1826,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.client_id = bp->fp->cl_id;
config->hdr.reserved1 = 0;
- bp->set_mac_pending++;
+ bp->set_mac_pending = 1;
smp_wmb();
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -1748,134 +1924,6 @@ static void bnx2x_self_test(struct net_device *dev,
#endif
}
-static const struct {
- long offset;
- int size;
- u8 string[ETH_GSTRING_LEN];
-} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
- { Q_STATS_OFFSET32(error_bytes_received_hi),
- 8, "[%d]: rx_error_bytes" },
- { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%d]: rx_ucast_packets" },
- { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%d]: rx_mcast_packets" },
- { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%d]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
- { Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%d]: rx_phy_ip_err_discards"},
- { Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%d]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
-
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
- { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_ucast_packets" },
- { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%d]: tx_mcast_packets" },
- { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%d]: tx_bcast_packets" }
-};
-
-static const struct {
- long offset;
- int size;
- u32 flags;
-#define STATS_FLAGS_PORT 1
-#define STATS_FLAGS_FUNC 2
-#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
- u8 string[ETH_GSTRING_LEN];
-} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_bytes" },
- { STATS_OFFSET32(error_bytes_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
- { STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
- { STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
- { STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
- { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
- 8, STATS_FLAGS_PORT, "rx_crc_errors" },
- { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
- 8, STATS_FLAGS_PORT, "rx_align_errors" },
- { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
- 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
- { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
- 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
-/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
- 8, STATS_FLAGS_PORT, "rx_fragments" },
- { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
- 8, STATS_FLAGS_PORT, "rx_jabbers" },
- { STATS_OFFSET32(no_buff_discard_hi),
- 8, STATS_FLAGS_BOTH, "rx_discards" },
- { STATS_OFFSET32(mac_filter_discard),
- 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
- { STATS_OFFSET32(xxoverflow_discard),
- 4, STATS_FLAGS_PORT, "rx_fw_discards" },
- { STATS_OFFSET32(brb_drop_hi),
- 8, STATS_FLAGS_PORT, "rx_brb_discard" },
- { STATS_OFFSET32(brb_truncate_hi),
- 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
- { STATS_OFFSET32(pause_frames_received_hi),
- 8, STATS_FLAGS_PORT, "rx_pause_frames" },
- { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
- 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
- { STATS_OFFSET32(nig_timer_max),
- 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
-/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
- 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
- { STATS_OFFSET32(rx_skb_alloc_failed),
- 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
- { STATS_OFFSET32(hw_csum_err),
- 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
-
- { STATS_OFFSET32(total_bytes_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_bytes" },
- { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
- 8, STATS_FLAGS_PORT, "tx_error_bytes" },
- { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
- { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
- { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
- { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
- 8, STATS_FLAGS_PORT, "tx_mac_errors" },
- { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
- 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
- 8, STATS_FLAGS_PORT, "tx_single_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
- 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
- 8, STATS_FLAGS_PORT, "tx_deferred" },
- { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_late_collisions" },
- { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_total_collisions" },
- { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
- 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
- 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
- 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
- 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
-/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
- 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
- { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
- 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
- { STATS_OFFSET32(etherstatspktsover1522octets_hi),
- 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
- { STATS_OFFSET32(pause_frames_sent_hi),
- 8, STATS_FLAGS_PORT, "tx_pause_frames" }
-};
-
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
@@ -1890,7 +1938,8 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
- num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
+ num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
+ BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
} else {
@@ -1916,15 +1965,25 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
int i, j, k;
+ char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
k = 0;
- for_each_queue(bp, i) {
+ for_each_napi_queue(bp, i) {
+ memset(queue_name, 0, sizeof(queue_name));
+
+ if (IS_FCOE_IDX(i))
+ sprintf(queue_name, "fcoe");
+ else
+ sprintf(queue_name, "%d", i);
+
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- sprintf(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string, i);
+ snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ bnx2x_q_stats_arr[j].string,
+ queue_name);
k += BNX2X_NUM_Q_STATS;
}
if (IS_MF_MODE_STAT(bp))
@@ -1958,7 +2017,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
k = 0;
- for_each_queue(bp, i) {
+ for_each_napi_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..6238d4f63989 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -427,7 +434,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
-#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
};
@@ -672,7 +684,7 @@ struct shm_dev_info { /* size */
#define E1VN_MAX 1
#define E1HVN_MAX 4
-
+#define E2_VF_MAX 64
/* This value (in milliseconds) determines the frequency of the driver
* issuing the PULSE message code. The firmware monitors this periodic
* pulse to determine when to switch to an OS-absent mode. */
@@ -808,6 +820,11 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+#define REQ_BC_VER_4_SET_MF_BW 0x00060202
+#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -881,6 +898,7 @@ struct drv_func_mb {
u32 drv_status;
#define DRV_STATUS_PMF 0x00000001
+#define DRV_STATUS_SET_MF_BW 0x00000004
#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -889,6 +907,8 @@ struct drv_func_mb {
#define DRV_STATUS_DCC_RESERVED1 0x00000800
#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -981,12 +1001,43 @@ struct func_mf_cfg {
};
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ u32 func_cfg;
+#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
+#define MACP_FUNC_CFG_FLAGS_SHIFT 0
+#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+
+ u32 iscsi_mac_addr_upper;
+ u32 iscsi_mac_addr_lower;
+
+ u32 fcoe_mac_addr_upper;
+ u32 fcoe_mac_addr_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 preserve_data;
+#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+};
+
struct mf_cfg {
struct shared_mf_cfg shared_mf_config;
struct port_mf_cfg port_mf_config[PORT_MAX];
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
};
@@ -1042,6 +1093,251 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+struct dcbx_ets_feature {
+ u32 enabled;
+ u32 pg_bw_tbl[2];
+ u32 pri_pg_tbl[1];
+};
+
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ u8 pri_en_bitmap;
+#define DCBX_PFC_PRI_0 0x01
+#define DCBX_PFC_PRI_1 0x02
+#define DCBX_PFC_PRI_2 0x04
+#define DCBX_PFC_PRI_3 0x08
+#define DCBX_PFC_PRI_4 0x10
+#define DCBX_PFC_PRI_5 0x20
+#define DCBX_PFC_PRI_6 0x40
+#define DCBX_PFC_PRI_7 0x80
+ u8 pfc_caps;
+ u8 reserved;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 reserved;
+ u8 pfc_caps;
+ u8 pri_en_bitmap;
+#define DCBX_PFC_PRI_0 0x01
+#define DCBX_PFC_PRI_1 0x02
+#define DCBX_PFC_PRI_2 0x04
+#define DCBX_PFC_PRI_3 0x08
+#define DCBX_PFC_PRI_4 0x10
+#define DCBX_PFC_PRI_5 0x20
+#define DCBX_PFC_PRI_6 0x40
+#define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ u16 app_id;
+ u8 pri_bitmap;
+ u8 appBitfield;
+#define DCBX_APP_ENTRY_VALID 0x01
+#define DCBX_APP_ENTRY_SF_MASK 0x30
+#define DCBX_APP_ENTRY_SF_SHIFT 4
+#define DCBX_APP_SF_ETH_TYPE 0x10
+#define DCBX_APP_SF_PORT 0x20
+#elif defined(__LITTLE_ENDIAN)
+ u8 appBitfield;
+#define DCBX_APP_ENTRY_VALID 0x01
+#define DCBX_APP_ENTRY_SF_MASK 0x30
+#define DCBX_APP_ENTRY_SF_SHIFT 4
+#define DCBX_APP_SF_ETH_TYPE 0x10
+#define DCBX_APP_SF_PORT 0x20
+ u8 pri_bitmap;
+ u16 app_id;
+#endif
+};
+
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ u8 reserved;
+ u8 default_pri;
+ u8 tc_supported;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 tc_supported;
+ u8 default_pri;
+ u8 reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ struct dcbx_pfc_feature pfc;
+ struct dcbx_app_priority_feature app;
+};
+
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ u8 msg_fast_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_tx_interval;
+ u8 admin_status;
+#define LLDP_TX_ONLY 0x01
+#define LLDP_RX_ONLY 0x02
+#define LLDP_TX_RX 0x03
+#define LLDP_DISABLED 0x04
+ u8 reserved1;
+ u8 tx_fast;
+ u8 tx_crd_max;
+ u8 tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ u8 admin_status;
+#define LLDP_TX_ONLY 0x01
+#define LLDP_RX_ONLY 0x02
+#define LLDP_TX_RX 0x03
+#define LLDP_DISABLED 0x04
+ u8 msg_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_fast_tx_interval;
+ u8 tx_crd;
+ u8 tx_crd_max;
+ u8 tx_fast;
+ u8 reserved1;
+#endif
+#define REM_CHASSIS_ID_STAT_LEN 4
+#define REM_PORT_ID_STAT_LEN 4
+ u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+#define LOCAL_CHASSIS_ID_STAT_LEN 2
+#define LOCAL_PORT_ID_STAT_LEN 2
+ u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ u32 num_tx_dcbx_pkts;
+ u32 num_rx_dcbx_pkts;
+};
+
+struct lldp_admin_mib {
+ u32 ver_cfg_flags;
+#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+#define DCBX_ETS_RECO_VALID 0x00000010
+#define DCBX_ETS_WILLING 0x00000020
+#define DCBX_PFC_WILLING 0x00000040
+#define DCBX_APP_WILLING 0x00000080
+#define DCBX_VERSION_CEE 0x00000100
+#define DCBX_VERSION_IEEE 0x00000200
+#define DCBX_DCBX_ENABLED 0x00000400
+#define DCBX_CEE_VERSION_MASK 0x0000f000
+#define DCBX_CEE_VERSION_SHIFT 12
+#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+#define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+struct lldp_remote_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+#define DCBX_ETS_TLV_RX 0x00000001
+#define DCBX_PFC_TLV_RX 0x00000002
+#define DCBX_APP_TLV_RX 0x00000004
+#define DCBX_ETS_RX_ERROR 0x00000010
+#define DCBX_PFC_RX_ERROR 0x00000020
+#define DCBX_APP_RX_ERROR 0x00000040
+#define DCBX_ETS_REM_WILLING 0x00000100
+#define DCBX_PFC_REM_WILLING 0x00000200
+#define DCBX_APP_REM_WILLING 0x00000400
+#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_local_mib {
+ u32 prefix_seq_num;
+ u32 error;
+#define DCBX_LOCAL_ETS_ERROR 0x00000001
+#define DCBX_LOCAL_PFC_ERROR 0x00000002
+#define DCBX_LOCAL_APP_ERROR 0x00000004
+#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+#define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
struct shmem2_region {
@@ -1065,7 +1361,12 @@ struct shmem2_region {
#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
struct fw_flr_mb flr_mb;
- u32 reserved[3];
+ u32 dcbx_lldp_params_offset;
+#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ u32 dcbx_neg_res_offset;
+#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ u32 dcbx_remote_mib_offset;
+#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
/*
* The other shmemX_base_addr holds the other path's shmem address
* required for example in case of common phy init, or for path1 to know
@@ -1074,6 +1375,10 @@ struct shmem2_region {
*/
u32 other_shmem_base_addr;
u32 other_shmem2_base_addr;
+ u32 reserved1[E2_VF_MAX / 32];
+ u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
+ u32 dcbx_lldp_dcbx_stat_offset;
+#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
};
@@ -1527,8 +1832,8 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 6
-#define BCM_5710_FW_MINOR_VERSION 0
-#define BCM_5710_FW_REVISION_VERSION 34
+#define BCM_5710_FW_MINOR_VERSION 2
+#define BCM_5710_FW_REVISION_VERSION 5
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -2976,6 +3281,25 @@ struct fairness_vars_per_vn {
/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+ struct priority_cos
+ traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dcb_version;
+ u8 dcb_enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+};
+
+
+/*
* FW version stored in the Xstorm RAM
*/
struct fw_version {
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e46b61..66df29fcf751 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
/****************************************************************************
* SRC initializations
****************************************************************************/
-
+#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-
+#endif
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774df843..43b0de24f391 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -164,7 +164,8 @@
#define EDC_MODE_PASSIVE_DAC 0x0055
-
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
/**********************************************************/
/* INTERFACE */
/**********************************************************/
@@ -205,6 +206,270 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
return val;
}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+void bnx2x_ets_disabled(struct link_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
+
+ /**
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /**
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /**
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /**
+ * If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+
+void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+ /**
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /**
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ const u32 total_bw = cos0_bw + cos1_bw;
+ u32 cos0_credit_weight = 0;
+ u32 cos1_credit_weight = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+ if ((0 == total_bw) ||
+ (0 == cos0_bw) ||
+ (0 == cos1_bw)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+
+ bnx2x_ets_bw_limit_common(params);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /**
+ * For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /**
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return 0;
+}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+
+static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
+
+ REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
+ pfc_frames_sent, 2);
+
+ REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
+ pfc_frames_received, 2);
+
+}
+static void bnx2x_emac_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val_xon = 0;
+ u32 val_xoff = 0;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
+
+ /* PFC received frames */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+ pfc_frames_received[0] = val_xon + val_xoff;
+
+ /* PFC received sent */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+ pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+ DP(NETIF_MSG_LINK, "pfc statistic\n");
+
+ if (!vars->link_up)
+ return;
+
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
+ bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ } else {
+ DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
+ bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ }
+}
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
{
@@ -315,24 +580,55 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
}
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /**
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC again */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
/* Set Loopback */
@@ -362,7 +658,9 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
@@ -383,9 +681,38 @@ static u8 bnx2x_emac_enable(struct link_params *params,
return 0;
}
-static void bnx2x_update_bmac2(struct link_params *params,
- struct link_vars *vars,
- u8 is_lb)
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ u32 val = 0x14;
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
/*
* Set rx control: Strip CRC and enable BigMAC to relay
@@ -397,7 +724,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1<<5);
wb_data[0] = val;
@@ -408,14 +737,47 @@ static void bnx2x_update_bmac2(struct link_params *params,
/* Tx control */
val = 0xc0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1<<0); /* RX */
+ wb_data[0] |= (1<<1); /* TX */
+ wb_data[0] |= (1<<2); /* Force initial Xon */
+ wb_data[0] |= (1<<3); /* 8 cos */
+ wb_data[0] |= (1<<5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1<<2);
+ } else {
+ DP(NETIF_MSG_LINK, "PFC is disabled\n");
+ /* disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+ /**
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
val = 0x8000;
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1<<16); /* enable automatic re-send */
+
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
@@ -427,6 +789,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
val |= 0x4; /* Local loopback */
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1<<6)|(1<<5));
wb_data[0] = val;
wb_data[1] = 0;
@@ -434,6 +799,239 @@ static void bnx2x_update_bmac2(struct link_params *params,
wb_data, 2);
}
+static void bnx2x_update_pfc_brb(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ struct bnx2x *bp = params->bp;
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+
+ /* default - pause configuration */
+ u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
+ u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
+ u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
+ u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+
+ if (set_pfc && pfc_params)
+ /* First COS */
+ if (!pfc_params->cos0_pauseable) {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
+ }
+ /* The number of free blocks below which the pause signal to class 0
+ of MAC #n is asserted. n=0,1 */
+ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
+ /* The number of free blocks above which the pause signal to class 0
+ of MAC #n is de-asserted. n=0,1 */
+ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
+ /* The number of free blocks below which the full signal to class 0
+ of MAC #n is asserted. n=0,1 */
+ REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
+ /* The number of free blocks above which the full signal to class 0
+ of MAC #n is de-asserted. n=0,1 */
+ REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
+
+ if (set_pfc && pfc_params) {
+ /* Second COS */
+ if (pfc_params->cos1_pauseable) {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+ } else {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
+ }
+ /**
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ **/
+ REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
+ /**
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ **/
+ REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
+ /**
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ **/
+ REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
+ /**
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ **/
+ REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
+ }
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+ u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+ u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 pkt_priority_to_cos = 0;
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ int port = params->port;
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+ /**
+ * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(bp,
+ port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /**
+ * nig params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 0;
+ p0_hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /*defaul non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 1;
+ }
+
+ REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* output enable for RX_XCM # IF */
+ REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+
+ /* 0x2 = BMAC, 0x1= EMAC */
+ switch (vars->mac_type) {
+ case MAC_TYPE_EMAC:
+ val = 1;
+ break;
+ case MAC_TYPE_BMAC:
+ val = 0;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
+
+ if (nig_params) {
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK,
+ nig_params->rx_cos0_priority_mask);
+
+ REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK,
+ nig_params->rx_cos1_priority_mask);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
+}
+
+
+void bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ /**
+ * The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ u32 val;
+ struct bnx2x *bp = params->bp;
+
+ /* update NIG params */
+ bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+ /* update BRB params */
+ bnx2x_update_pfc_brb(params, vars, pfc_params);
+
+ if (!vars->link_up)
+ return;
+
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return;
+ }
+
+ DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, 0);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+}
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
@@ -465,15 +1063,6 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
wb_data, 2);
- /* tx control */
- val = 0xc0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- val |= 0x800000;
- wb_data[0] = val;
- wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
- wb_data, 2);
-
/* mac control */
val = 0x3;
if (is_lb) {
@@ -491,14 +1080,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
wb_data, 2);
- /* rx control set to don't strip crc */
- val = 0x14;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- val |= 0x20;
- wb_data[0] = val;
- wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
- wb_data, 2);
+ bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
@@ -595,7 +1177,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
wb_data, 2);
udelay(30);
- bnx2x_update_bmac2(params, vars, is_lb);
+ bnx2x_update_pfc_bmac2(params, vars, is_lb);
return 0;
}
@@ -610,7 +1192,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -627,7 +1209,9 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
val = 0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
@@ -3525,13 +4109,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -3898,7 +4488,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
return -EINVAL;
@@ -3982,7 +4572,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
@@ -5302,7 +5892,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5431,6 +6021,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +6032,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5450,11 +6041,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5625,7 +6221,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6928,7 +7528,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -6966,9 +7566,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
@@ -6999,6 +7608,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7021,7 +7631,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7743,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 171abf8097ee..bedab1a942c4 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -65,6 +65,22 @@
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
+#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
+
+#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
+#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
+
+#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
+#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
+
+#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
+#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
/***********************************************************/
/* Structs */
/***********************************************************/
@@ -216,6 +232,7 @@ struct link_params {
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
@@ -332,4 +349,43 @@ u8 bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+ /* NIG */
+ u32 pause_enable;
+ u32 llfc_out_en;
+ u32 llfc_enable;
+ u32 pkt_priority_to_cos;
+ u32 rx_cos0_priority_mask;
+ u32 rx_cos1_priority_mask;
+ u32 llfc_high_priority_classes;
+ u32 llfc_low_priority_classes;
+ /* BRB */
+ u32 cos0_pauseable;
+ u32 cos1_pauseable;
+};
+
+/**
+ * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+void bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+void bnx2x_ets_disabled(struct link_params *params);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2]);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index e9ad16f00b56..cf54427a8d80 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -55,6 +55,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
@@ -121,6 +122,10 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
static struct workqueue_struct *bnx2x_wq;
+#ifdef BCM_CNIC
+static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
+#endif
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711 = 1,
@@ -921,7 +926,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
sp_sb_data.p_func.vf_valid);
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
int loop;
struct hc_status_block_data_e2 sb_data_e2;
@@ -961,6 +966,10 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* host sb data */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ continue;
+#endif
BNX2X_ERR(" run indexes (");
for (j = 0; j < HC_SB_MAX_SM; j++)
pr_cont("0x%x%s",
@@ -1029,7 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
- for_each_queue(bp, i) {
+ for_each_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -1063,7 +1072,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
/* Tx */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -1298,7 +1307,7 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
synchronize_irq(bp->msix_table[i + offset].vector);
} else
synchronize_irq(bp->pdev->irq);
@@ -1420,7 +1429,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@ -2026,13 +2035,28 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
static void bnx2x_read_mf_cfg(struct bnx2x *bp)
{
- int vn;
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
if (BP_NOMCP(bp))
return; /* what should be the default bvalue in this case */
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- int /*abs*/func = 2*vn + BP_PORT(bp);
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
bp->mf_config[vn] =
MF_CFG_RD(bp, func_mf_config[func].config);
}
@@ -2238,6 +2262,15 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp) && IS_MF(bp))
+ return false;
+#endif
+ return true;
+}
+
/* must be called under rtnl_lock */
static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
@@ -2248,10 +2281,21 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
u8 unmatched_unicast = 0;
+ if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
+ unmatched_unicast = 1;
+
if (filters & BNX2X_PROMISCUOUS_MODE) {
/* promiscious - accept all, drop none */
drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
+ if (IS_MF_SI(bp)) {
+ /*
+ * SI mode defines to accept in promiscuos mode
+ * only unmatched packets
+ */
+ unmatched_unicast = 1;
+ accp_all_ucast = 0;
+ }
}
if (filters & BNX2X_ACCEPT_UNICAST) {
/* accept matched ucast */
@@ -2260,6 +2304,11 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
if (filters & BNX2X_ACCEPT_MULTICAST) {
/* accept matched mcast */
drop_all_mcast = 0;
+ if (IS_MF_SI(bp))
+ /* since mcast addresses won't arrive with ovlan,
+ * fw needs to accept all of them in
+ * switch-independent mode */
+ accp_all_mcast = 1;
}
if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
/* accept all mcast */
@@ -2372,7 +2421,7 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
/* calculate queue flags */
flags |= QUEUE_FLG_CACHE_ALIGN;
flags |= QUEUE_FLG_HC;
- flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
+ flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
flags |= QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2380,7 +2429,8 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
if (!fp->disable_tpa)
flags |= QUEUE_FLG_TPA;
- flags |= QUEUE_FLG_STATS;
+ flags = stat_counter_valid(bp, fp) ?
+ (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
return flags;
}
@@ -2440,7 +2490,10 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
- rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
rxq_init->cid = HW_CID(bp, fp->cid);
@@ -2460,6 +2513,12 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
@@ -2573,6 +2632,26 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2677,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
-
- bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
- bnx2x_link_sync_notify(bp);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ bnx2x_config_mf_bw(bp);
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
}
@@ -3022,10 +3098,19 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DCC_EVENT_MASK)
bnx2x_dcc_event(bp,
(val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
bnx2x__link_status_update(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS))
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
} else if (attn & BNX2X_MC_ASSERT_BITS) {
BNX2X_ERR("MC assert!\n");
@@ -3637,11 +3722,23 @@ static void bnx2x_eq_int(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
goto next_spqe;
+ if (cid == BNX2X_FCOE_ETH_CID)
+ bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
+ else
#endif
- bnx2x_fp(bp, cid, state) =
+ bnx2x_fp(bp, cid, state) =
BNX2X_FP_STATE_CLOSED;
goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ goto next_spqe;
}
switch (opcode | bp->state) {
@@ -3714,7 +3811,13 @@ static void bnx2x_sp_task(struct work_struct *work)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ if ((!NO_FCOE(bp)) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+#endif
/* Handle EQ completions */
bnx2x_eq_int(bp);
@@ -4097,7 +4200,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
bp->rx_ticks, bp->tx_ticks);
}
@@ -4145,13 +4248,16 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
- bp->fp->cl_id + (i % bp->num_queues));
+ bp->fp->cl_id + (i % (bp->num_queues -
+ NONE_ETH_CONTEXT_USE)));
}
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
int mode = bp->rx_mode;
+ int port = BP_PORT(bp);
u16 cl_id;
+ u32 def_q_filters = 0;
/* All but management unicast packets should pass to the host as well */
u32 llh_mask =
@@ -4162,30 +4268,42 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ def_q_filters = BNX2X_ACCEPT_NONE;
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ }
+#endif
break;
case BNX2X_RX_MODE_NORMAL:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_MULTICAST);
+ def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_MULTICAST;
+#ifdef BCM_CNIC
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_MULTICAST);
+#endif
break;
case BNX2X_RX_MODE_ALLMULTI:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_ALL_MULTICAST);
+ def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_ALL_MULTICAST;
+#ifdef BCM_CNIC
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_MULTICAST);
+#endif
break;
case BNX2X_RX_MODE_PROMISC:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
-
+ def_q_filters |= BNX2X_PROMISCUOUS_MODE;
+#ifdef BCM_CNIC
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_MULTICAST);
+#endif
/* pass management unicast packets as well */
llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
break;
@@ -4195,20 +4313,24 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
break;
}
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
+
REG_WR(bp,
- BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
- NIG_REG_LLH0_BRB1_DRV_MASK,
- llh_mask);
+ (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
DP(NETIF_MSG_IFUP, "rx mode %d\n"
"drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
- "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
+ "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
+ "unmatched_ucast 0x%x\n", mode,
bp->mac_filters.ucast_drop_all,
bp->mac_filters.mcast_drop_all,
bp->mac_filters.bcast_drop_all,
bp->mac_filters.ucast_accept_all,
bp->mac_filters.mcast_accept_all,
- bp->mac_filters.bcast_accept_all
+ bp->mac_filters.bcast_accept_all,
+ bp->mac_filters.unmatched_unicast
);
storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
@@ -4232,6 +4354,15 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
bp->mf_mode);
}
+ if (IS_MF_SI(bp))
+ /*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH
+ */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -4247,6 +4378,7 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
static void bnx2x_init_internal_port(struct bnx2x *bp)
{
/* port */
+ bnx2x_dcb_init_intmem_pfc(bp);
}
static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
@@ -4308,9 +4440,11 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_init_fp_sb(bp, i);
#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
bnx2x_init_sb(bp, bp->cnic_sb_mapping,
BNX2X_VF_ID_INVALID, false,
@@ -5048,12 +5182,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_WR(bp, PRS_REG_NIC_MODE, 1);
#endif
if (!CHIP_IS_E1(bp))
- REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
+ REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
if (CHIP_IS_E2(bp)) {
/* Bit-map indicating which L2 hdrs may appear after the
basic Ethernet header */
- int has_ovlan = IS_MF(bp);
+ int has_ovlan = IS_MF_SD(bp);
REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
}
@@ -5087,7 +5221,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
if (CHIP_IS_E2(bp)) {
- int has_ovlan = IS_MF(bp);
+ int has_ovlan = IS_MF_SD(bp);
REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
}
@@ -5164,12 +5298,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
- REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
}
if (CHIP_IS_E2(bp)) {
/* Bit-map indicating which L2 hdrs may appear after the
basic Ethernet header */
- REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
+ REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
}
if (CHIP_REV_IS_SLOW(bp))
@@ -5370,8 +5504,10 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */
- REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
- (IS_MF(bp) ? 0xF7 : 0x7));
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -5386,7 +5522,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
/* 0x2 disable mf_ov, 0x1 enable */
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
- (IS_MF(bp) ? 0x1 : 0x2));
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
if (CHIP_IS_E2(bp)) {
val = 0;
@@ -5816,6 +5952,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
/* fastpath */
/* Common */
for_each_queue(bp, i) {
+#ifdef BCM_CNIC
+ /* FCoE client uses default status block */
+ if (IS_FCOE_IDX(i)) {
+ union host_hc_status_block *sb =
+ &bnx2x_fp(bp, i, status_blk);
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ bnx2x_fp(bp, i, status_blk_mapping) = 0;
+ } else {
+#endif
/* status blocks */
if (CHIP_IS_E2(bp))
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
@@ -5825,9 +5970,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
}
/* Rx */
- for_each_queue(bp, i) {
+ for_each_rx_queue(bp, i) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -5847,7 +5995,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
}
/* Tx */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
/* fastpath tx rings: tx_buf tx_desc */
BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -5931,15 +6079,20 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
bnx2x_fp(bp, i, bp) = bp;
/* status blocks */
- if (CHIP_IS_E2(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
-
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(i)) {
+#endif
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
set_sb_shortcuts(bp, i);
}
/* Rx */
@@ -6055,7 +6208,7 @@ static int bnx2x_func_stop(struct bnx2x *bp)
* @param cam_offset offset in a CAM to use
* @param is_bcast is the set MAC a broadcast address (for E1 only)
*/
-static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
+static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
u32 cl_bit_vec, u8 cam_offset,
u8 is_bcast)
{
@@ -6170,6 +6323,70 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
return BP_VN(bp) * 32 + rel_offset;
}
+/**
+ * LLH CAM line allocations: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ *
+ * When multiple unicast ETH MACs PF configuration in switch
+ * independent mode is required (NetQ, multiple netdev MACs,
+ * etc.), consider better utilisation of 16 per function MAC
+ * entries in the LLH memory.
+ */
+enum {
+ LLH_CAM_ISCSI_ETH_LINE = 0,
+ LLH_CAM_ETH_LINE,
+ LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
+};
+
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ int set,
+ unsigned char *dev_addr,
+ int index)
+{
+ u32 wb_data[2];
+ u32 mem_offset, ena_offset, mem_index;
+ /**
+ * indexes mapping:
+ * 0..7 - goes to MEM
+ * 8..15 - goes to MEM2
+ */
+
+ if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ return;
+
+ /* calculate memory start offset according to the mapping
+ * and index in the memory */
+ if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
+ mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+ ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE;
+ mem_index = index;
+ } else {
+ mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
+ NIG_REG_P0_LLH_FUNC_MEM2;
+ ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
+ NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
+ mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
+ }
+
+ if (set) {
+ /* LLH_FUNC_MEM is a u64 WB register */
+ mem_offset += 8*mem_index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ REG_WR_DMAE(bp, mem_offset, wb_data, 2);
+ }
+
+ /* enable/disable the entry */
+ REG_WR(bp, ena_offset + 4*mem_index, set);
+
+}
+
void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
@@ -6179,9 +6396,13 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
(1 << bp->fp->cl_id), cam_offset , 0);
+ bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
+
if (CHIP_IS_E1(bp)) {
/* broadcast MAC */
- u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
}
}
@@ -6283,12 +6504,59 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
- u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
+ u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
/* Send a SET_MAC ramrod */
bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
cam_offset, 0);
+
+ bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+
+ return 0;
+}
+
+/**
+ * Set FCoE L2 MAC(s) at the next enties in the CAM after the
+ * ETH MAC(s). This function will wait until the ramdord
+ * completion returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
+{
+ u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
+ /**
+ * CAM allocation for E1H
+ * eth unicasts: by func number
+ * iscsi: by func number
+ * fip unicast: by func number
+ * fip multicast: by func number
+ */
+ bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
+ cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
+
+ return 0;
+}
+
+int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
+{
+ u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
+
+ /**
+ * CAM allocation for E1H
+ * eth unicasts: by func number
+ * iscsi: by func number
+ * fip unicast: by func number
+ * fip multicast: by func number
+ */
+ bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
+ bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
+
return 0;
}
#endif
@@ -6306,6 +6574,8 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
data->general.statistics_counter_id = params->rxq_params.stat_id;
data->general.statistics_en_flg =
(params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
+ data->general.is_fcoe_flg =
+ (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
data->general.activate_flg = activate;
data->general.sp_client_id = params->rxq_params.spcl_id;
@@ -6374,7 +6644,9 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
data->fc.safc_group_num = params->txq_params.cos;
data->fc.safc_group_en_flg =
(params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
- data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ data->fc.traffic_type =
+ (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
}
static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
@@ -6473,7 +6745,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
bnx2x_enable_msi(bp);
/* falling through... */
case INT_MODE_INTx:
- bp->num_queues = 1;
+ bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
default:
@@ -6496,8 +6768,8 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
"enable MSI-X (%d), "
"set number of queues to %d\n",
bp->num_queues,
- 1);
- bp->num_queues = 1;
+ 1 + NONE_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
if (!(bp->flags & DISABLE_MSI_FLAG))
bnx2x_enable_msi(bp);
@@ -6618,7 +6890,9 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_client_init_params params = { {0} };
int rc;
- bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
params.ramrod_params.pstate = &fp->state;
@@ -6626,6 +6900,12 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
params.ramrod_params.index = fp->index;
params.ramrod_params.cid = fp->cid;
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ params.ramrod_params.flags |= CLIENT_IS_FCOE;
+
+#endif
+
if (is_leading)
params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
@@ -6710,7 +6990,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
/* FP SBs */
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
REG_WR8(bp,
BAR_CSTRORM_INTMEM +
@@ -6830,6 +7110,20 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
+#ifdef BCM_CNIC
+static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
+{
+ if (bp->flags & FCOE_MACS_SET) {
+ if (!IS_MF_SD(bp))
+ bnx2x_set_fip_eth_mac_addr(bp, 0);
+
+ bnx2x_set_all_enode_macs(bp, 0);
+
+ bp->flags &= ~FCOE_MACS_SET;
+ }
+}
+#endif
+
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
@@ -6837,7 +7131,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
int i, cnt, rc;
/* Wait until tx fastpath tasks complete */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
cnt = 1000;
@@ -6877,13 +7171,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
}
#ifdef BCM_CNIC
- /* Clear iSCSI L2 MAC */
- mutex_lock(&bp->cnic_mutex);
- if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
- bnx2x_set_iscsi_eth_mac_addr(bp, 0);
- bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
- }
- mutex_unlock(&bp->cnic_mutex);
+ bnx2x_del_fcoe_eth_macs(bp);
#endif
if (unload_mode == UNLOAD_NORMAL)
@@ -7736,7 +8024,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
bp->igu_sb_cnt = 0;
if (CHIP_INT_MODE_IS_BC(bp)) {
bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
- bp->l2_cid_count);
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -7767,7 +8055,8 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
}
}
}
- bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
if (bp->igu_sb_cnt == 0)
BNX2X_ERR("CAM configuration error\n");
}
@@ -8076,9 +8365,8 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- u32 val, val2;
u32 config;
- u32 ext_phy_type, ext_phy_config;;
+ u32 ext_phy_type, ext_phy_config;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -8135,25 +8423,73 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
+}
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ random_ether_addr(bp->dev->dev_addr);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ /* iSCSI NPAR MAC */
+ if (IS_MF_SI(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ }
+ }
+#endif
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+#endif
+ }
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ /* Inform the upper layers about FCoE MAC */
+ if (!CHIP_IS_E1x(bp)) {
+ if (IS_MF_SD(bp))
+ memcpy(bp->fip_mac, bp->dev->dev_addr,
+ sizeof(bp->fip_mac));
+ else
+ memcpy(bp->fip_mac, bp->iscsi_mac,
+ sizeof(bp->fip_mac));
+ }
#endif
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
{
- int func = BP_ABS_FUNC(bp);
- int vn;
- u32 val, val2;
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn, port;
+ u32 val = 0;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
@@ -8163,7 +8499,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_dsb_id = DEF_SB_IGU_ID;
bp->igu_base_sb = 0;
- bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
} else {
bp->common.int_block = INT_BLOCK_IGU;
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -8186,44 +8523,99 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
vn = BP_E1HVN(bp);
+ port = BP_PORT(bp);
+
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ DP(NETIF_MSG_PROBE,
+ "shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
if (SHMEM2_HAS(bp, mf_cfg_addr))
bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
else
bp->common.mf_cfg_base = bp->common.shmem_base +
offsetof(struct shmem_region, func_mb) +
E1H_FUNC_MAX * sizeof(struct drv_func_mb);
- bp->mf_config[vn] =
- MF_CFG_RD(bp, func_mf_config[func].config);
+ /*
+ * get mf configuration:
+ * 1. existance of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ val = MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper);
+ /* check for legal mac (upper bytes)*/
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ DP(NETIF_MSG_PROBE, "illegal MAC "
+ "address for SI\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ DP(NETIF_MSG_PROBE, "illegal OV for "
+ "SD\n");
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
+ val);
+ }
+ }
- val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
- bp->mf_mode = 1;
BNX2X_DEV_INFO("%s function mode\n",
IS_MF(bp) ? "multi" : "single");
- if (IS_MF(bp)) {
- val = (MF_CFG_RD(bp, func_mf_config[func].
- e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->mf_ov = val;
- BNX2X_DEV_INFO("MF OV for func %d is %d "
- "(0x%04x)\n",
- func, bp->mf_ov, bp->mf_ov);
+ BNX2X_DEV_INFO("MF OV for func %d is %d"
+ " (0x%04x)\n", func,
+ bp->mf_ov, bp->mf_ov);
} else {
- BNX2X_ERROR("No valid MF OV for func %d,"
- " aborting\n", func);
+ BNX2X_ERR("No valid MF OV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
- } else {
- if (BP_VN(bp)) {
- BNX2X_ERROR("VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF "
+ "switch-independent mode\n", func);
+ break;
+ default:
+ if (vn) {
+ BNX2X_ERR("VN %d in single function mode,"
+ " aborting\n", vn);
rc = -EPERM;
}
+ break;
}
+
}
/* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8640,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
- if (IS_MF(bp)) {
- val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
- val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
- if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
- bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
- bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
- bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
- bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
- bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
- bp->dev->dev_addr[5] = (u8)(val & 0xff);
- memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
- ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
- ETH_ALEN);
- }
-
- return rc;
- }
-
- if (BP_NOMCP(bp)) {
- /* only supposed to happen on emulation/FPGA */
- BNX2X_ERROR("warning: random MAC workaround active\n");
- random_ether_addr(bp->dev->dev_addr);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
- }
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
return rc;
}
@@ -8427,6 +8795,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->timer.data = (unsigned long) bp;
bp->timer.function = bnx2x_timer;
+ bnx2x_dcbx_init_params(bp);
+
return rc;
}
@@ -8629,6 +8999,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
.ndo_start_xmit = bnx2x_start_xmit,
+ .ndo_select_queue = bnx2x_select_queue,
.ndo_set_multicast_list = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -8761,7 +9132,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->netdev_ops = &bnx2x_netdev_ops;
bnx2x_set_ethtool_ops(dev);
dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (bp->flags & USING_DAC_FLAG)
dev->features |= NETIF_F_HIGHDMA;
dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,7 +9140,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
dev->vlan_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_HW_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (bp->flags & USING_DAC_FLAG)
dev->vlan_features |= NETIF_F_HIGHDMA;
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -9064,10 +9435,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
default:
pr_err("Unknown board_type (%ld), aborting\n",
ent->driver_data);
- return ENODEV;
+ return -ENODEV;
}
- cid_count += CNIC_CONTEXT_USE;
+ cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
@@ -9096,11 +9467,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
- rc = register_netdev(dev);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
- }
+#ifdef BCM_CNIC
+ /* disable FCOE L2 queue for E1x*/
+ if (CHIP_IS_E1x(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+#endif
/* Configure interupt mode: try to enable MSI-X/MSI if
* needed, set bp->num_queues appropriately.
@@ -9110,6 +9482,21 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_exit;
+ }
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
@@ -9153,6 +9540,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
bp = netdev_priv(dev);
+#ifdef BCM_CNIC
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
unregister_netdev(dev);
/* Delete all NAPI objects */
@@ -9202,7 +9598,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@@ -9429,7 +9825,8 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
break;
else
atomic_dec(&bp->spq_left);
- } else if (type == ISCSI_CONNECTION_TYPE) {
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
if (bp->cnic_spq_pending >=
bp->cnic_eth_dev.max_kwqe_pending)
break;
@@ -9576,6 +9973,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_START_L2_CMD: {
u32 cli = ctl->data.ring.client_id;
+ /* Clear FCoE FIP and ALL ENODE MACs addresses first */
+ bnx2x_del_fcoe_eth_macs(bp);
+
/* Set iSCSI MAC address */
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
@@ -9697,10 +10097,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
mutex_lock(&bp->cnic_mutex);
- if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
- bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
- bnx2x_set_iscsi_eth_mac_addr(bp, 0);
- }
cp->drv_state = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
@@ -9731,7 +10127,9 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1cefe489a955..bfd875b72906 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1615,6 +1615,8 @@
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@@ -1744,12 +1746,16 @@
~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
port */
#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
/* [RW 16] classes are high-priority for port0 */
#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
/* [RW 16] classes are low-priority for port0 */
#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
@@ -1774,6 +1780,8 @@
/* [RW 8] event id for llh0 */
#define NIG_REG_LLH0_EVENT_ID 0x10084
#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
/* [RW 1] Determine the IP version to look for in
~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1805,9 @@
#define NIG_REG_LLH1_ERROR_MASK 0x10090
/* [RW 8] event id for llh1 */
#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
/* [RW 8] init credit counter for port1 in LLH */
#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
#define NIG_REG_LLH1_XCM_MASK 0x10134
@@ -1907,11 +1918,17 @@
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
/* [RW 1] Input enable for RX PBF LP IF */
#define NIG_REG_PBF_LB_IN_EN 0x100b4
/* [RW 1] Value of this register will be transmitted to port swap when
~nig_registers_strap_override.strap_override =1 */
#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
/* [RW 1] output enable for RX parser descriptor IF */
#define NIG_REG_PRS_EOP_OUT_EN 0x10104
/* [RW 1] Input enable for RX parser request IF */
@@ -1978,6 +1995,14 @@
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
/* [RW 1] Disable processing further tasks from port 0 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@@ -1988,9 +2013,16 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 1] Indicates which COS is conncted to the highest priority in the
+ * command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
@@ -2016,6 +2048,10 @@
#define PBF_REG_MAC_LB_ENABLE 0x140040
/* [RW 6] Bit-map indicating which headers must appear in the packet */
#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
not suppoterd. */
#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -4970,7 +5006,23 @@
#define EMAC_REG_EMAC_TX_MODE 0xbc
#define EMAC_REG_EMAC_TX_STAT_AC 0x280
#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
#define EMAC_RX_MODE_RESET (1L<<0)
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 4733c835dad9..6e4d9b144cc4 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -160,7 +160,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
ramrod_data.drv_counter = bp->stats_counter++;
ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -766,7 +766,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->no_buff_discard_hi = 0;
estats->no_buff_discard_lo = 0;
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
int cl_id = fp->cl_id;
struct tstorm_per_client_stats *tclient =
@@ -996,7 +996,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
nstats->rx_dropped = tmp;
@@ -1087,7 +1087,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
@@ -1101,7 +1101,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
fp->rx_calls, fp->rx_pkt);
}
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
struct netdev_queue *txq =
@@ -1381,7 +1381,8 @@ void bnx2x_stats_init(struct bnx2x *bp)
memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
}
- for_each_queue(bp, i) {
+ /* FW stats are currently collected for ETH clients only */
+ for_each_eth_queue(bp, i) {
/* Set initial stats counter in the stats ramrod data to -1 */
int cl_id = bp->fp[i].cl_id;
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index afd15efa429a..596798c47452 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -53,7 +53,6 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -225,7 +224,6 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 6f9c6faef24c..0e2737eac8b7 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
bonding-objs += $(ipv6-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914bc4e9c..48cf24ff4e6f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
goto out;
read_lock(&bond->lock);
- slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
- orig_dev);
+ slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
if (!slave)
goto out_unlock;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 26bb118c4533..f4e638c65129 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -44,42 +44,6 @@
#include "bond_alb.h"
-#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
-#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
- * Used for division - never set
- * to zero !!!
- */
-#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
- * learning packets to the switch
- */
-
-#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
- * Note that this value MUST NOT be smaller
- * because the key hash table is BYTE wide !
- */
-
-
-#define TLB_NULL_INDEX 0xffffffff
-#define MAX_LP_BURST 3
-
-/* rlb defs */
-#define RLB_HASH_TABLE_SIZE 256
-#define RLB_NULL_INDEX 0xffffffff
-#define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */
-#define RLB_ARP_BURST_SIZE 2
-#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
- * rebalance interval (5 min).
- */
-/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
- * promiscuous after failover
- */
-#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
#ifndef __long_aligned
#define __long_aligned __attribute__((aligned((sizeof(long)))))
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 50968f8196cf..118c28aa471e 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -31,6 +31,44 @@ struct slave;
#define BOND_ALB_INFO(bond) ((bond)->alb_info)
#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
+#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
+#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
+ * Used for division - never set
+ * to zero !!!
+ */
+#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
+ * learning packets to the switch
+ */
+
+#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+ * Note that this value MUST NOT be smaller
+ * because the key hash table is BYTE wide !
+ */
+
+
+#define TLB_NULL_INDEX 0xffffffff
+#define MAX_LP_BURST 3
+
+/* rlb defs */
+#define RLB_HASH_TABLE_SIZE 256
+#define RLB_NULL_INDEX 0xffffffff
+#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
+#define RLB_ARP_BURST_SIZE 2
+#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
+ * rebalance interval (5 min).
+ */
+/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
+ * promiscuous after failover
+ */
+#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
+
+
struct tlb_client_info {
struct slave *tx_slave; /* A pointer to slave used for transmiting
* packets to a Client that the Hash function
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
new file mode 100644
index 000000000000..3680aa251dea
--- /dev/null
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -0,0 +1,146 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+#include "bond_alb.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *bonding_debug_root;
+
+/*
+ * Show RLB hash table
+ */
+static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
+{
+ struct bonding *bond = m->private;
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ u32 hash_index;
+
+ if (bond->params.mode != BOND_MODE_ALB)
+ return 0;
+
+ seq_printf(m, "SourceIP DestinationIP "
+ "Destination MAC DEV\n");
+
+ spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+ seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
+ &client_info->ip_src,
+ &client_info->ip_dst,
+ &client_info->mac_dst,
+ client_info->slave->dev->name);
+ }
+
+ spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+
+ return 0;
+}
+
+static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bond_debug_rlb_hash_show, inode->i_private);
+}
+
+static const struct file_operations bond_debug_rlb_hash_fops = {
+ .owner = THIS_MODULE,
+ .open = bond_debug_rlb_hash_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void bond_debug_register(struct bonding *bond)
+{
+ if (!bonding_debug_root)
+ return;
+
+ bond->debug_dir =
+ debugfs_create_dir(bond->dev->name, bonding_debug_root);
+
+ if (!bond->debug_dir) {
+ pr_warning("%s: Warning: failed to register to debugfs\n",
+ bond->dev->name);
+ return;
+ }
+
+ debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
+ bond, &bond_debug_rlb_hash_fops);
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+ if (!bonding_debug_root)
+ return;
+
+ debugfs_remove_recursive(bond->debug_dir);
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+ struct dentry *d;
+
+ if (!bonding_debug_root)
+ return;
+
+ d = debugfs_rename(bonding_debug_root, bond->debug_dir,
+ bonding_debug_root, bond->dev->name);
+ if (d) {
+ bond->debug_dir = d;
+ } else {
+ pr_warning("%s: Warning: failed to reregister, "
+ "so just unregister old one\n",
+ bond->dev->name);
+ bond_debug_unregister(bond);
+ }
+}
+
+void bond_create_debugfs(void)
+{
+ bonding_debug_root = debugfs_create_dir("bonding", NULL);
+
+ if (!bonding_debug_root) {
+ pr_warning("Warning: Cannot create bonding directory"
+ " in debugfs\n");
+ }
+}
+
+void bond_destroy_debugfs(void)
+{
+ debugfs_remove_recursive(bonding_debug_root);
+ bonding_debug_root = NULL;
+}
+
+
+#else /* !CONFIG_DEBUG_FS */
+
+void bond_debug_register(struct bonding *bond)
+{
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+}
+
+void bond_create_debugfs(void)
+{
+}
+
+void bond_destroy_debugfs(void)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 121b073a6c3f..84fbd4ebd778 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev,
}
if (vlan_id) {
- skb = vlan_put_tag(skb, vlan_id);
+ /* The Ethernet header is not present yet, so it is
+ * too early to insert a VLAN tag. Force use of an
+ * out-of-line tag here and let dev_hard_start_xmit()
+ * insert it if the slave hardware can't.
+ */
+ skb = __vlan_hwaccel_put_tag(skb, vlan_id);
if (!skb) {
pr_err("failed to insert VLAN tag\n");
return;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..b1025b85acf1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -171,7 +171,7 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-cpumask_var_t netpoll_block_tx;
+atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
static const char * const version =
@@ -418,36 +418,11 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
* @bond: bond device that got this skb for tx.
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
- *
- * When the bond gets an skb to transmit that is
- * already hardware accelerated VLAN tagged, and it
- * needs to relay this skb to a slave that is not
- * hw accel capable, the skb needs to be "unaccelerated",
- * i.e. strip the hwaccel tag and re-insert it as part
- * of the payload.
*/
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
- unsigned short uninitialized_var(vlan_id);
-
- /* Test vlan_list not vlgrp to catch and handle 802.1p tags */
- if (!list_empty(&bond->vlan_list) &&
- !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
- vlan_get_tag(skb, &vlan_id) == 0) {
- skb->dev = slave_dev;
- skb = vlan_put_tag(skb, vlan_id);
- if (!skb) {
- /* vlan_put_tag() frees the skb in case of error,
- * so return success here so the calling functions
- * won't attempt to free is again.
- */
- return 0;
- }
- } else {
- skb->dev = slave_dev;
- }
-
+ skb->dev = slave_dev;
skb->priority = 1;
#ifdef CONFIG_NET_POLL_CONTROLLER
if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
@@ -873,15 +848,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
- struct ip_mc_list *im;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
- if (in_dev) {
- for (im = in_dev->mc_list; im; im = im->next)
- ip_mc_rejoin_group(im);
- }
-
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
rcu_read_unlock();
}
@@ -1201,11 +1172,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_do_fail_over_mac(bond, new_active,
old_active);
- bond->send_grat_arp = bond->params.num_grat_arp;
- bond_send_gratuitous_arp(bond);
+ if (netif_running(bond->dev)) {
+ bond->send_grat_arp = bond->params.num_grat_arp;
+ bond_send_gratuitous_arp(bond);
- bond->send_unsol_na = bond->params.num_unsol_na;
- bond_send_unsolicited_na(bond);
+ bond->send_unsol_na = bond->params.num_unsol_na;
+ bond_send_unsolicited_na(bond);
+ }
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1219,8 +1192,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
/* resend IGMP joins since active slave has changed or
* all were sent on curr_active_slave */
- if ((USES_PRIMARY(bond->params.mode) && new_active) ||
- bond->params.mode == BOND_MODE_ROUNDROBIN) {
+ if (((USES_PRIMARY(bond->params.mode) && new_active) ||
+ bond->params.mode == BOND_MODE_ROUNDROBIN) &&
+ netif_running(bond->dev)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
}
@@ -1574,7 +1548,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->slave_cnt == 0)
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
slave_dev->addr_len);
@@ -3209,7 +3183,7 @@ out:
#ifdef CONFIG_PROC_FS
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&dev_base_lock)
+ __acquires(RCU)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
@@ -3218,7 +3192,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
int i;
/* make sure the bond won't be taken away */
- read_lock(&dev_base_lock);
+ rcu_read_lock();
read_lock(&bond->lock);
if (*pos == 0)
@@ -3248,12 +3222,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void bond_info_seq_stop(struct seq_file *seq, void *v)
__releases(&bond->lock)
- __releases(&dev_base_lock)
+ __releases(RCU)
{
struct bonding *bond = seq->private;
read_unlock(&bond->lock);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static void bond_info_show_master(struct seq_file *seq)
@@ -3507,6 +3481,8 @@ static int bond_event_changename(struct bonding *bond)
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
+ bond_debug_reregister(bond);
+
return NOTIFY_DONE;
}
@@ -4789,6 +4765,8 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
+ bond_debug_unregister(bond);
+
__hw_addr_flush(&bond->mc_list);
list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
@@ -5191,6 +5169,8 @@ static int bond_init(struct net_device *bond_dev)
bond_prepare_sysfs_group(bond);
+ bond_debug_register(bond);
+
__hw_addr_init(&bond->mc_list);
return 0;
}
@@ -5297,13 +5277,6 @@ static int __init bonding_init(void)
if (res)
goto out;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
- res = -ENOMEM;
- goto out;
- }
-#endif
-
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
@@ -5312,6 +5285,8 @@ static int __init bonding_init(void)
if (res)
goto err_link;
+ bond_create_debugfs();
+
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -5322,7 +5297,6 @@ static int __init bonding_init(void)
if (res)
goto err;
-
register_netdevice_notifier(&bond_netdev_notifier);
register_inetaddr_notifier(&bond_inetaddr_notifier);
bond_register_ipv6_notifier();
@@ -5332,9 +5306,6 @@ err:
rtnl_link_unregister(&bond_link_ops);
err_link:
unregister_pernet_subsys(&bond_net_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
-#endif
goto out;
}
@@ -5346,12 +5317,16 @@ static void __exit bonding_exit(void)
bond_unregister_ipv6_notifier();
bond_destroy_sysfs();
+ bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops);
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
+ /*
+ * Make sure we don't have an imbalance on our netpoll blocking
+ */
+ WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..4da384cc7603 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -119,26 +119,22 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern cpumask_var_t netpoll_block_tx;
+extern atomic_t netpoll_block_tx;
static inline void block_netpoll_tx(void)
{
- preempt_disable();
- BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
- netpoll_block_tx));
+ atomic_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
- netpoll_block_tx));
- preempt_enable();
+ atomic_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
- return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
+ return atomic_read(&netpoll_block_tx);
return 0;
}
#else
@@ -259,6 +255,10 @@ struct bonding {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr master_ipv6;
#endif
+#ifdef CONFIG_DEBUG_FS
+ /* debugging suport via debugfs */
+ struct dentry *debug_dir;
+#endif /* CONFIG_DEBUG_FS */
};
/**
@@ -273,11 +273,11 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
bond_for_each_slave(bond, slave, i) {
if (slave->dev == slave_dev) {
- break;
+ return slave;
}
}
- return slave;
+ return 0;
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -286,7 +286,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
return NULL;
}
- return (struct bonding *)netdev_priv(slave->dev->master);
+ return netdev_priv(slave->dev->master);
}
static inline bool bond_is_lb(const struct bonding *bond)
@@ -380,6 +380,11 @@ void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_register_arp(struct bonding *);
void bond_unregister_arp(struct bonding *);
+void bond_create_debugfs(void);
+void bond_destroy_debugfs(void);
+void bond_debug_register(struct bonding *bond);
+void bond_debug_unregister(struct bonding *bond);
+void bond_debug_reregister(struct bonding *bond);
struct bond_net {
struct net * net; /* Associated network namespace */
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da86f13..32b1c6fb2de1 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,7 +5,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/version.h>
#include <linux/init.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c0656667..80511167f35b 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
ndev = alloc_netdev(sizeof(struct cfspi),
"cfspi%d", cfspi_setup);
- if (!dev)
- return -ENODEV;
+ if (!ndev)
+ return -ENOMEM;
cfspi = netdev_priv(ndev);
netif_stop_queue(ndev);
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b0fff0..d5a9db60ade9 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@ config CAN_VCAN
This driver can also be built as a module. If so, the module
will be called vcan.
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on CAN
+ default N
+ ---help---
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the SocketCAN SVN, see
+ http://developer.berlios.de/projects/socketcan for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_DEV
tristate "Platform CAN drivers with Netlink support"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a4f106..07ca159ba3f9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_CAN_VCAN) += vcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 6e533dcc36c0..b9a6d7a5a739 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1114,11 +1114,6 @@ static bool ican3_txok(struct ican3_dev *mod)
/*
* Recieve one CAN frame from the hardware
*
- * This works like the core of a NAPI function, but is intended to be called
- * from workqueue context instead. This driver already needs a workqueue to
- * process control messages, so we use the workqueue instead of using NAPI.
- * This was done to simplify locking.
- *
* CONTEXT: must be called from user context
*/
static int ican3_recv_skb(struct ican3_dev *mod)
@@ -1251,7 +1246,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
* Reset an ICAN module to its power-on state
*
* CONTEXT: no network device registered
- * LOCKING: work function disabled
*/
static int ican3_reset_module(struct ican3_dev *mod)
{
@@ -1262,9 +1256,6 @@ static int ican3_reset_module(struct ican3_dev *mod)
/* disable interrupts so no more work is scheduled */
iowrite8(1 << mod->num, &mod->ctrl->int_disable);
- /* flush any pending work */
- flush_scheduled_work();
-
/* the first unallocated page in the DPM is #9 */
mod->free_page = DPM_FREE_START;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
- "bus-off state expected");
+ "bus-off state expected\n");
out_8(&regs->canmisc, MSCAN_BOHOLD);
/* Re-enable receive interrupts. */
out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c68..8d45fdd0180d 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,106 +32,115 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#define MAX_MSG_OBJ 32
-#define MSG_OBJ_RX 0 /* The receive message object flag. */
-#define MSG_OBJ_TX 1 /* The transmit message object flag. */
-
-#define ENABLE 1 /* The enable flag */
-#define DISABLE 0 /* The disable flag */
-#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
-#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
-#define CAN_CTRL_IE_SIE_EIE 0x000e
-#define CAN_CTRL_CCE 0x0040
-#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
-#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
-#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
-#define CAN_CMASK_RX_TX_SET 0x00f3
-#define CAN_CMASK_RX_TX_GET 0x0073
-#define CAN_CMASK_ALL 0xff
-#define CAN_CMASK_RDWR 0x80
-#define CAN_CMASK_ARB 0x20
-#define CAN_CMASK_CTRL 0x10
-#define CAN_CMASK_MASK 0x40
-#define CAN_CMASK_NEWDAT 0x04
-#define CAN_CMASK_CLRINTPND 0x08
-
-#define CAN_IF_MCONT_NEWDAT 0x8000
-#define CAN_IF_MCONT_INTPND 0x2000
-#define CAN_IF_MCONT_UMASK 0x1000
-#define CAN_IF_MCONT_TXIE 0x0800
-#define CAN_IF_MCONT_RXIE 0x0400
-#define CAN_IF_MCONT_RMTEN 0x0200
-#define CAN_IF_MCONT_TXRQXT 0x0100
-#define CAN_IF_MCONT_EOB 0x0080
-#define CAN_IF_MCONT_DLC 0x000f
-#define CAN_IF_MCONT_MSGLOST 0x4000
-#define CAN_MASK2_MDIR_MXTD 0xc000
-#define CAN_ID2_DIR 0x2000
-#define CAN_ID_MSGVAL 0x8000
-
-#define CAN_STATUS_INT 0x8000
-#define CAN_IF_CREQ_BUSY 0x8000
-#define CAN_ID2_XTD 0x4000
-
-#define CAN_REC 0x00007f00
-#define CAN_TEC 0x000000ff
-
-#define PCH_RX_OK 0x00000010
-#define PCH_TX_OK 0x00000008
-#define PCH_BUS_OFF 0x00000080
-#define PCH_EWARN 0x00000040
-#define PCH_EPASSIV 0x00000020
-#define PCH_LEC0 0x00000001
-#define PCH_LEC1 0x00000002
-#define PCH_LEC2 0x00000004
-#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
-#define PCH_STUF_ERR PCH_LEC0
-#define PCH_FORM_ERR PCH_LEC1
-#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
-#define PCH_BIT1_ERR PCH_LEC2
-#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
-#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
+#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
+#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
+#define PCH_CTRL_CCE BIT(6)
+#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
+#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
+#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
+
+#define PCH_CMASK_RX_TX_SET 0x00f3
+#define PCH_CMASK_RX_TX_GET 0x0073
+#define PCH_CMASK_ALL 0xff
+#define PCH_CMASK_NEWDAT BIT(2)
+#define PCH_CMASK_CLRINTPND BIT(3)
+#define PCH_CMASK_CTRL BIT(4)
+#define PCH_CMASK_ARB BIT(5)
+#define PCH_CMASK_MASK BIT(6)
+#define PCH_CMASK_RDWR BIT(7)
+#define PCH_IF_MCONT_NEWDAT BIT(15)
+#define PCH_IF_MCONT_MSGLOST BIT(14)
+#define PCH_IF_MCONT_INTPND BIT(13)
+#define PCH_IF_MCONT_UMASK BIT(12)
+#define PCH_IF_MCONT_TXIE BIT(11)
+#define PCH_IF_MCONT_RXIE BIT(10)
+#define PCH_IF_MCONT_RMTEN BIT(9)
+#define PCH_IF_MCONT_TXRQXT BIT(8)
+#define PCH_IF_MCONT_EOB BIT(7)
+#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
+#define PCH_ID2_DIR BIT(13)
+#define PCH_ID2_XTD BIT(14)
+#define PCH_ID_MSGVAL BIT(15)
+#define PCH_IF_CREQ_BUSY BIT(15)
+
+#define PCH_STATUS_INT 0x8000
+#define PCH_REC 0x00007f00
+#define PCH_TEC 0x000000ff
+
+#define PCH_TX_OK BIT(3)
+#define PCH_RX_OK BIT(4)
+#define PCH_EPASSIV BIT(5)
+#define PCH_EWARN BIT(6)
+#define PCH_BUS_OFF BIT(7)
/* bit position of certain controller bits. */
-#define BIT_BITT_BRP 0
-#define BIT_BITT_SJW 6
-#define BIT_BITT_TSEG1 8
-#define BIT_BITT_TSEG2 12
-#define BIT_IF1_MCONT_RXIE 10
-#define BIT_IF2_MCONT_TXIE 11
-#define BIT_BRPE_BRPE 6
-#define BIT_ES_TXERRCNT 0
-#define BIT_ES_RXERRCNT 8
-#define MSK_BITT_BRP 0x3f
-#define MSK_BITT_SJW 0xc0
-#define MSK_BITT_TSEG1 0xf00
-#define MSK_BITT_TSEG2 0x7000
-#define MSK_BRPE_BRPE 0x3c0
-#define MSK_BRPE_GET 0x0f
-#define MSK_CTRL_IE_SIE_EIE 0x07
-#define MSK_MCONT_TXIE 0x08
-#define MSK_MCONT_RXIE 0x10
-#define PCH_CAN_NO_TX_BUFF 1
-#define COUNTER_LIMIT 10
+#define PCH_BIT_BRP_SHIFT 0
+#define PCH_BIT_SJW_SHIFT 6
+#define PCH_BIT_TSEG1_SHIFT 8
+#define PCH_BIT_TSEG2_SHIFT 12
+#define PCH_BIT_BRPE_BRPE_SHIFT 6
+
+#define PCH_MSK_BITT_BRP 0x3f
+#define PCH_MSK_BRPE_BRPE 0x3c0
+#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
+#define PCH_COUNTER_LIMIT 10
#define PCH_CAN_CLK 50000000 /* 50MHz */
-/* Define the number of message object.
+/*
+ * Define the number of message object.
* PCH CAN communications are done via Message RAM.
- * The Message RAM consists of 32 message objects. */
-#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
-#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
-#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+ * The Message RAM consists of 32 message objects.
+ */
+#define PCH_RX_OBJ_NUM 26
+#define PCH_TX_OBJ_NUM 6
+#define PCH_RX_OBJ_START 1
+#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
+#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
+#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
#define PCH_FIFO_THRESH 16
+/* TxRqst2 show status of MsgObjNo.17~32 */
+#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
+ (PCH_RX_OBJ_END - 16))
+
+enum pch_ifreg {
+ PCH_RX_IFREG,
+ PCH_TX_IFREG,
+};
+
+enum pch_can_err {
+ PCH_STUF_ERR = 1,
+ PCH_FORM_ERR,
+ PCH_ACK_ERR,
+ PCH_BIT1_ERR,
+ PCH_BIT0_ERR,
+ PCH_CRC_ERR,
+ PCH_LEC_ALL,
+};
+
enum pch_can_mode {
PCH_CAN_ENABLE,
PCH_CAN_DISABLE,
PCH_CAN_ALL,
PCH_CAN_NONE,
PCH_CAN_STOP,
- PCH_CAN_RUN
+ PCH_CAN_RUN,
+};
+
+struct pch_can_if_regs {
+ u32 creq;
+ u32 cmask;
+ u32 mask1;
+ u32 mask2;
+ u32 id1;
+ u32 id2;
+ u32 mcont;
+ u32 data[4];
+ u32 rsv[13];
};
struct pch_can_regs {
@@ -142,57 +151,36 @@ struct pch_can_regs {
u32 intr;
u32 opt;
u32 brpe;
- u32 reserve1;
- u32 if1_creq;
- u32 if1_cmask;
- u32 if1_mask1;
- u32 if1_mask2;
- u32 if1_id1;
- u32 if1_id2;
- u32 if1_mcont;
- u32 if1_dataa1;
- u32 if1_dataa2;
- u32 if1_datab1;
- u32 if1_datab2;
- u32 reserve2;
- u32 reserve3[12];
- u32 if2_creq;
- u32 if2_cmask;
- u32 if2_mask1;
- u32 if2_mask2;
- u32 if2_id1;
- u32 if2_id2;
- u32 if2_mcont;
- u32 if2_dataa1;
- u32 if2_dataa2;
- u32 if2_datab1;
- u32 if2_datab2;
- u32 reserve4;
- u32 reserve5[20];
+ u32 reserve;
+ struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
+ u32 reserve1[8];
u32 treq1;
u32 treq2;
- u32 reserve6[2];
- u32 reserve7[56];
- u32 reserve8[3];
+ u32 reserve2[6];
+ u32 data1;
+ u32 data2;
+ u32 reserve3[6];
+ u32 canipend1;
+ u32 canipend2;
+ u32 reserve4[6];
+ u32 canmval1;
+ u32 canmval2;
+ u32 reserve5[37];
u32 srst;
};
struct pch_can_priv {
struct can_priv can;
- unsigned int can_num;
struct pci_dev *dev;
- unsigned int tx_enable[MAX_MSG_OBJ];
- unsigned int rx_enable[MAX_MSG_OBJ];
- unsigned int rx_link[MAX_MSG_OBJ];
- unsigned int int_enables;
- unsigned int int_stat;
+ u32 tx_enable[PCH_TX_OBJ_END];
+ u32 rx_enable[PCH_TX_OBJ_END];
+ u32 rx_link[PCH_TX_OBJ_END];
+ u32 int_enables;
struct net_device *ndev;
- spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
- unsigned int msg_obj[MAX_MSG_OBJ];
struct pch_can_regs __iomem *regs;
struct napi_struct napi;
- unsigned int tx_obj; /* Point next Tx Obj index */
- unsigned int use_msi;
+ int tx_obj; /* Point next Tx Obj index */
+ int use_msi;
};
static struct can_bittiming_const pch_can_bittiming_const = {
@@ -228,15 +216,15 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
{
switch (mode) {
case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
break;
case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
break;
default:
- dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
break;
}
}
@@ -246,357 +234,184 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
u32 reg_val = ioread32(&priv->regs->opt);
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= CAN_OPT_SILENT;
+ reg_val |= PCH_OPT_SILENT;
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= CAN_OPT_LBACK;
+ reg_val |= PCH_OPT_LBACK;
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
iowrite32(reg_val, &priv->regs->opt);
}
-static void pch_can_set_int_custom(struct pch_can_priv *priv)
+static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
{
- /* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
-
- /* Appropriately setting them. */
- pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
-}
+ int counter = PCH_COUNTER_LIMIT;
+ u32 ifx_creq;
-/* This function retrieves interrupt enabled for the CAN device. */
-static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
-{
- /* Obtaining the status of IE, SIE and EIE interrupt bits. */
- *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
}
static void pch_can_set_int_enables(struct pch_can_priv *priv,
enum pch_can_mode interrupt_no)
{
switch (interrupt_no) {
- case PCH_CAN_ENABLE:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
- break;
-
case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
break;
case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
default:
- dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ netdev_err(priv->ndev, "Invalid interrupt number.\n");
break;
}
}
-static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
-{
- u32 counter = COUNTER_LIMIT;
- u32 ifx_creq;
-
- iowrite32(num, creq_addr);
- while (counter) {
- ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
- if (!ifx_creq)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
-}
-
-static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- /* Reading the receive buffer data from RAM to Interface1 registers */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-
- /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
-
- if (set == ENABLE) {
- /* Setting the MsgVal and RxIE bits */
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
-
- } else if (set == DISABLE) {
- /* Resetting the MsgVal and RxIE bits */
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
- }
-
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
+ int set, enum pch_ifreg dir)
{
- int i;
+ u32 ie;
- /* Traversing to obtain the object configured as receivers. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, ENABLE);
- }
-}
+ if (dir)
+ ie = PCH_IF_MCONT_TXIE;
+ else
+ ie = PCH_IF_MCONT_RXIE;
-static void pch_can_rx_disable_all(struct pch_can_priv *priv)
-{
- int i;
+ /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
- /* Traversing to obtain the object configured as receivers. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, DISABLE);
- }
-}
+ /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[dir].cmask);
-static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- /* Reading the Msg buffer from Message RAM to Interface2 registers. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
- /* Setting the IF2CMASK register for accessing the
- MsgVal and TxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
-
- if (set == ENABLE) {
- /* Setting the MsgVal and TxIE bits */
- pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
- } else if (set == DISABLE) {
- /* Resetting the MsgVal and TxIE bits. */
- pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ if (set) {
+ /* Setting the MsgVal and RxIE/TxIE bits */
+ pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
+ pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
+ } else {
+ /* Clearing the MsgVal and RxIE/TxIE bits */
+ pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
+ pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
}
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
}
-static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
{
int i;
- /* Traversing to obtain the object configured as transmit object. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, ENABLE);
- }
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
}
-static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
{
int i;
/* Traversing to obtain the object configured as transmit object. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, DISABLE);
- }
-}
-
-static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 *enable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-
- if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
- ((ioread32(&priv->regs->if1_mcont)) &
- CAN_IF_MCONT_RXIE))
- *enable = ENABLE;
- else
- *enable = DISABLE;
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 *enable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
- if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
- ((ioread32(&priv->regs->if2_mcont)) &
- CAN_IF_MCONT_TXIE)) {
- *enable = ENABLE;
- } else {
- *enable = DISABLE;
- }
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
}
-static int pch_can_int_pending(struct pch_can_priv *priv)
+static u32 pch_can_int_pending(struct pch_can_priv *priv)
{
return ioread32(&priv->regs->intr) & 0xffff;
}
-static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, u32 set)
+static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
- if (set == ENABLE)
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
- else
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
-
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, u32 *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-
- if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
- *link = DISABLE;
- else
- *link = ENABLE;
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_clear_buffers(struct pch_can_priv *priv)
-{
- int i;
-
- for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
- iowrite32(0xffff, &priv->regs->if1_mask1);
- iowrite32(0xffff, &priv->regs->if1_mask2);
- iowrite32(0x0, &priv->regs->if1_id1);
- iowrite32(0x0, &priv->regs->if1_id2);
- iowrite32(0x0, &priv->regs->if1_mcont);
- iowrite32(0x0, &priv->regs->if1_dataa1);
- iowrite32(0x0, &priv->regs->if1_dataa2);
- iowrite32(0x0, &priv->regs->if1_datab1);
- iowrite32(0x0, &priv->regs->if1_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
- }
-
- for (i = i; i < PCH_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
- iowrite32(0xffff, &priv->regs->if2_mask1);
- iowrite32(0xffff, &priv->regs->if2_mask2);
- iowrite32(0x0, &priv->regs->if2_id1);
- iowrite32(0x0, &priv->regs->if2_id2);
- iowrite32(0x0, &priv->regs->if2_mcont);
- iowrite32(0x0, &priv->regs->if2_dataa1);
- iowrite32(0x0, &priv->regs->if2_dataa2);
- iowrite32(0x0, &priv->regs->if2_datab1);
- iowrite32(0x0, &priv->regs->if2_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ int i; /* Msg Obj ID (1~32) */
+
+ for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
+ iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
+ iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
+ iowrite32(0x0, &priv->regs->ifregs[0].id1);
+ iowrite32(0x0, &priv->regs->ifregs[0].id2);
+ iowrite32(0x0, &priv->regs->ifregs[0].mcont);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
}
}
static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
{
int i;
- unsigned long flags;
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ iowrite32(0x0, &priv->regs->ifregs[0].id1);
+ iowrite32(0x0, &priv->regs->ifregs[0].id2);
- iowrite32(0x0, &priv->regs->if1_id1);
- iowrite32(0x0, &priv->regs->if1_id2);
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_UMASK);
- pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_UMASK);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == PCH_RX_OBJ_END)
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
+ else
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
- /* Set FIFO mode set to 0 except last Rx Obj*/
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
- /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
- if (i == (PCH_RX_OBJ_NUM - 1))
- pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
+ iowrite32(0, &priv->regs->ifregs[0].mask1);
+ pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
+ 0x1fff | PCH_MASK2_MDIR_MXTD);
- iowrite32(0, &priv->regs->if1_mask1);
- pch_can_bit_clear(&priv->regs->if1_mask2,
- 0x1fff | CAN_MASK2_MDIR_MXTD);
+ /* Setting CMASK for writing */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
+ PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
- /* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
-
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
- } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
- &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
+ }
- /* Resetting DIR bit for reception */
- iowrite32(0x0, &priv->regs->if2_id1);
- iowrite32(0x0, &priv->regs->if2_id2);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
- /* Setting EOB bit for transmitter */
- iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->ifregs[1].id1);
+ iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
- pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_UMASK);
+ /* Setting EOB bit for transmitter */
+ iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
+ &priv->regs->ifregs[1].mcont);
- iowrite32(0, &priv->regs->if2_mask1);
- pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+ iowrite32(0, &priv->regs->ifregs[1].mask1);
+ pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
- /* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
+ /* Setting CMASK for writing */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
+ PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
- }
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
}
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
}
static void pch_can_init(struct pch_can_priv *priv)
@@ -605,7 +420,7 @@ static void pch_can_init(struct pch_can_priv *priv)
pch_can_set_run_mode(priv, PCH_CAN_STOP);
/* Clearing all the message object buffers. */
- pch_can_clear_buffers(priv);
+ pch_can_clear_if_buffers(priv);
/* Configuring the respective message object as either rx/tx object. */
pch_can_config_rx_tx_buffers(priv);
@@ -623,57 +438,47 @@ static void pch_can_release(struct pch_can_priv *priv)
pch_can_set_int_enables(priv, PCH_CAN_NONE);
/* Disabling all the receive object. */
- pch_can_rx_disable_all(priv);
+ pch_can_set_rx_all(priv, 0);
/* Disabling all the transmit object. */
- pch_can_tx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
}
/* This function clears interrupt(s) from the CAN device. */
static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
{
- if (mask == CAN_STATUS_INT) {
- ioread32(&priv->regs->stat);
- return;
- }
-
/* Clear interrupt for transmit object */
- if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
- /* Setting CMASK for clearing interrupts for
- frame transmission. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
- &priv->regs->if2_cmask);
-
- /* Resetting the ID registers. */
- pch_can_bit_set(&priv->regs->if2_id2,
- CAN_ID2_DIR | (0x7ff << 2));
- iowrite32(0x0, &priv->regs->if2_id1);
-
- /* Claring NewDat, TxRqst & IntPnd */
- pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
- pch_can_check_if_busy(&priv->regs->if2_creq, mask);
- } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
/* Setting CMASK for clearing the reception interrupts. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
- &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+ &priv->regs->ifregs[0].cmask);
/* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
/* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
- pch_can_check_if_busy(&priv->regs->if1_creq, mask);
- }
-}
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
+ } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
+ /*
+ * Setting CMASK for clearing interrupts for frame transmission.
+ */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+ &priv->regs->ifregs[1].cmask);
-static int pch_can_get_buffer_status(struct pch_can_priv *priv)
-{
- return (ioread32(&priv->regs->treq1) & 0xffff) |
- ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->ifregs[1].id2,
+ PCH_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->ifregs[1].id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+ PCH_IF_MCONT_TXRQXT);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
+ }
}
static void pch_can_reset(struct pch_can_priv *priv)
@@ -688,7 +493,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
struct sk_buff *skb;
struct pch_can_priv *priv = netdev_priv(ndev);
struct can_frame *cf;
- u32 errc;
+ u32 errc, lec;
struct net_device_stats *stats = &(priv->ndev->stats);
enum can_state state = priv->can.state;
@@ -697,26 +502,24 @@ static void pch_can_error(struct net_device *ndev, u32 status)
return;
if (status & PCH_BUS_OFF) {
- pch_can_tx_disable_all(priv);
- pch_can_rx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
+ pch_can_set_rx_all(priv, 0);
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
can_bus_off(ndev);
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
- dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
}
+ errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 96)
+ if (((errc & PCH_REC) >> 8) > 96)
cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & CAN_TEC) > 96)
+ if ((errc & PCH_TEC) > 96)
cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- dev_warn(&ndev->dev,
+ netdev_dbg(ndev,
"%s -> Error Counter is more than 96.\n", __func__);
}
/* Error passive interrupt. */
@@ -724,46 +527,52 @@ static void pch_can_error(struct net_device *ndev, u32 status)
priv->can.can_stats.error_passive++;
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
- errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 127)
+ if (((errc & PCH_REC) >> 8) > 127)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & CAN_TEC) > 127)
+ if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- dev_err(&ndev->dev,
+ netdev_dbg(ndev,
"%s -> CAN controller is ERROR PASSIVE .\n", __func__);
}
- if (status & PCH_LEC_ALL) {
+ lec = status & PCH_LEC_ALL;
+ switch (lec) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- switch (status & PCH_LEC_ALL) {
- case PCH_STUF_ERR:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- case PCH_FORM_ERR:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case PCH_ACK_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
- CAN_ERR_PROT_LOC_ACK_DEL;
- break;
- case PCH_BIT1_ERR:
- case PCH_BIT0_ERR:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case PCH_CRC_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
- break;
- default:
- iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
- break;
- }
-
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_ACK_ERR:
+ cf->can_id |= CAN_ERR_ACK;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_LEC_ALL: /* Written by CPU. No error status */
+ break;
}
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
+
priv->can.state = state;
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -774,204 +583,202 @@ static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct pch_can_priv *priv = netdev_priv(ndev);
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
+ if (!pch_can_int_pending(priv))
+ return IRQ_NONE;
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
napi_schedule(&priv->napi);
-
return IRQ_HANDLED;
}
-static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
+{
+ if (obj_id < PCH_FIFO_THRESH) {
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
+ PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_INTPND);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
+ } else if (obj_id > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, obj_id);
+ } else if (obj_id == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt + 1);
+ }
+}
+
+static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_MSGLOST);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netif_receive_skb(skb);
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
{
u32 reg;
canid_t id;
- u32 ide;
- u32 rtr;
- int i, j, k;
int rcv_pkts = 0;
struct sk_buff *skb;
struct can_frame *cf;
struct pch_can_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &(priv->ndev->stats);
+ int i;
+ u32 id2;
+ u16 data_reg;
+
+ do {
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
- /* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->ifregs[0].mcont);
- /* Reading the MCONT register. */
- reg = ioread32(&priv->regs->if1_mcont);
- reg &= 0xffff;
+ if (reg & PCH_IF_MCONT_EOB)
+ break;
- for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
/* If MsgLost bit set. */
- if (reg & CAN_IF_MCONT_MSGLOST) {
- dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_MSGLOST);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, k);
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return -ENOMEM;
-
- priv->can.can_stats.error_passive++;
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
-
- netif_receive_skb(skb);
+ if (reg & PCH_IF_MCONT_MSGLOST) {
+ pch_can_rx_msg_lost(ndev, obj_num);
rcv_pkts++;
- goto RX_NEXT;
+ quota--;
+ obj_num++;
+ continue;
+ } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
+ obj_num++;
+ continue;
}
- if (!(reg & CAN_IF_MCONT_NEWDAT))
- goto RX_NEXT;
skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ netdev_err(ndev, "alloc_can_skb Failed\n");
+ return rcv_pkts;
+ }
/* Get Received data */
- ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
- if (ide) {
- id = (ioread32(&priv->regs->if1_id1) & 0xffff);
- id |= (((ioread32(&priv->regs->if1_id2)) &
- 0x1fff) << 16);
- cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ id2 = ioread32(&priv->regs->ifregs[0].id2);
+ if (id2 & PCH_ID2_XTD) {
+ id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
+ id |= (((id2) & 0x1fff) << 16);
+ cf->can_id = id | CAN_EFF_FLAG;
} else {
- id = (((ioread32(&priv->regs->if1_id2)) &
- (CAN_SFF_MASK << 2)) >> 2);
- cf->can_id = (id & CAN_SFF_MASK);
+ id = (id2 >> 2) & CAN_SFF_MASK;
+ cf->can_id = id;
}
- rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
- if (rtr) {
- cf->can_dlc = 0;
+ if (id2 & PCH_ID2_DIR)
cf->can_id |= CAN_RTR_FLAG;
- } else {
- cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
- 0x0f);
- }
- for (i = 0, j = 0; i < cf->can_dlc; j++) {
- reg = ioread32(&priv->regs->if1_dataa1 + j*4);
- cf->data[i++] = cpu_to_le32(reg & 0xff);
- if (i == cf->can_dlc)
- break;
- cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
+ ifregs[0].mcont)) & 0xF);
+
+ for (i = 0; i < cf->can_dlc; i += 2) {
+ data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
+ cf->data[i] = data_reg;
+ cf->data[i + 1] = data_reg >> 8;
}
netif_receive_skb(skb);
rcv_pkts++;
stats->rx_packets++;
+ quota--;
stats->rx_bytes += cf->can_dlc;
- if (k < PCH_FIFO_THRESH) {
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
- CAN_CMASK_ARB, &priv->regs->if1_cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_INTPND);
- pch_can_check_if_busy(&priv->regs->if1_creq, k);
- } else if (k > PCH_FIFO_THRESH) {
- pch_can_int_clr(priv, k);
- } else if (k == PCH_FIFO_THRESH) {
- int cnt;
- for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
- pch_can_int_clr(priv, cnt+1);
- }
-RX_NEXT:
- /* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
- reg = ioread32(&priv->regs->if1_mcont);
- }
+ pch_fifo_thresh(priv, obj_num);
+ obj_num++;
+ } while (quota > 0);
return rcv_pkts;
}
-static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+
+static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
{
- struct net_device *ndev = napi->dev;
struct pch_can_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
+
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
+ &priv->regs->ifregs[1].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
+ dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
+ PCH_IF_MCONT_DLC);
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ if (int_stat == PCH_TX_OBJ_END)
+ netif_wake_queue(ndev);
+}
+
+static int pch_can_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
u32 int_stat;
- int rcv_pkts = 0;
u32 reg_stat;
- unsigned long flags;
+ int quota_save = quota;
int_stat = pch_can_int_pending(priv);
if (!int_stat)
- return 0;
+ goto end;
-INT_STAT:
- if (int_stat == CAN_STATUS_INT) {
+ if (int_stat == PCH_STATUS_INT) {
reg_stat = ioread32(&priv->regs->stat);
- if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
- if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
- pch_can_error(ndev, reg_stat);
- }
- if (reg_stat & PCH_TX_OK) {
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq,
- ioread32(&priv->regs->intr));
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
+ ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
+ pch_can_error(ndev, reg_stat);
+ quota--;
}
- if (reg_stat & PCH_RX_OK)
- pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+ if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
+ pch_can_bit_clear(&priv->regs->stat,
+ reg_stat & (PCH_TX_OK | PCH_RX_OK));
int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
- goto INT_STAT;
}
-MSG_OBJ:
- if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- rcv_pkts = pch_can_rx_normal(ndev, int_stat);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- if (rcv_pkts < 0)
- return 0;
- } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
- if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
- /* Handle transmission interrupt */
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
- &priv->regs->if2_cmask);
- dlc = ioread32(&priv->regs->if2_mcont) &
- CAN_IF_MCONT_DLC;
- pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- if (dlc > 8)
- dlc = 8;
- stats->tx_bytes += dlc;
- stats->tx_packets++;
- }
- }
+ if (quota == 0)
+ goto end;
- int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
- goto INT_STAT;
- else if (int_stat >= 1 && int_stat <= 32)
- goto MSG_OBJ;
+ if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
+ quota -= pch_can_rx_normal(ndev, int_stat, quota);
+ } else if ((int_stat >= PCH_TX_OBJ_START) &&
+ (int_stat <= PCH_TX_OBJ_END)) {
+ /* Handle transmission interrupt */
+ pch_can_tx_complete(ndev, int_stat);
+ }
+end:
napi_complete(napi);
pch_can_set_int_enables(priv, PCH_CAN_ALL);
- return rcv_pkts;
+ return quota_save - quota;
}
static int pch_set_bittiming(struct net_device *ndev)
@@ -980,20 +787,18 @@ static int pch_set_bittiming(struct net_device *ndev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 canbit;
u32 bepe;
- u32 brp;
/* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
-
- brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
- canbit = brp & MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
- canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
- bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
+
+ canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
+ canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
+ bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
iowrite32(canbit, &priv->regs->bitt);
iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
return 0;
}
@@ -1008,8 +813,8 @@ static void pch_can_start(struct net_device *ndev)
pch_set_bittiming(ndev);
pch_can_set_optmode(priv);
- pch_can_tx_enable_all(priv);
- pch_can_rx_enable_all(priv);
+ pch_can_set_tx_all(priv, 1);
+ pch_can_set_rx_all(priv, 1);
/* Setting the CAN to run mode. */
pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1041,27 +846,18 @@ static int pch_can_open(struct net_device *ndev)
struct pch_can_priv *priv = netdev_priv(ndev);
int retval;
- retval = pci_enable_msi(priv->dev);
- if (retval) {
- dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
- priv->use_msi = 0;
- } else {
- dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
- priv->use_msi = 1;
- }
-
- /* Regsitering the interrupt. */
+ /* Regstering the interrupt. */
retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (retval) {
- dev_err(&ndev->dev, "request_irq failed.\n");
+ netdev_err(ndev, "request_irq failed.\n");
goto req_irq_err;
}
/* Open common can device */
retval = open_candev(ndev);
if (retval) {
- dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ netdev_err(ndev, "open_candev() failed %d\n", retval);
goto err_open_candev;
}
@@ -1075,9 +871,6 @@ static int pch_can_open(struct net_device *ndev)
err_open_candev:
free_irq(priv->dev->irq, ndev);
req_irq_err:
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
-
pch_can_release(priv);
return retval;
@@ -1091,102 +884,65 @@ static int pch_close(struct net_device *ndev)
napi_disable(&priv->napi);
pch_can_release(priv);
free_irq(priv->dev->irq, ndev);
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
close_candev(ndev);
priv->can.state = CAN_STATE_STOPPED;
return 0;
}
-static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
-{
- u32 buffer_status = 0;
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- /* Getting the message object status. */
- buffer_status = (u32) pch_can_get_buffer_status(priv);
-
- return buffer_status & obj_id;
-}
-
-
static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- int i, j;
- unsigned long flags;
struct pch_can_priv *priv = netdev_priv(ndev);
struct can_frame *cf = (struct can_frame *)skb->data;
- int tx_buffer_avail = 0;
+ int tx_obj_no;
+ int i;
+ u32 id2;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
- if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
- while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
- PCH_RX_OBJ_NUM)))
- udelay(500);
+ tx_obj_no = priv->tx_obj;
+ if (priv->tx_obj == PCH_TX_OBJ_END) {
+ if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
+ netif_stop_queue(ndev);
- priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
- tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ priv->tx_obj = PCH_TX_OBJ_START;
} else {
- tx_buffer_avail = priv->tx_obj;
+ priv->tx_obj++;
}
- priv->tx_obj++;
-
- /* Attaining the lock. */
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-
- /* Reading the Msg Obj from the Msg RAM to the Interface register. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
/* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+ pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
/* If ID extended is set. */
- pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
- pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
if (cf->can_id & CAN_EFF_FLAG) {
- pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
- pch_can_bit_set(&priv->regs->if2_id2,
- ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
+ id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
} else {
- pch_can_bit_set(&priv->regs->if2_id1, 0);
- pch_can_bit_set(&priv->regs->if2_id2,
- (cf->can_id & CAN_SFF_MASK) << 2);
+ iowrite32(0, &priv->regs->ifregs[1].id1);
+ id2 = (cf->can_id & CAN_SFF_MASK) << 2;
}
- /* If remote frame has to be transmitted.. */
- if (cf->can_id & CAN_RTR_FLAG)
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+ id2 |= PCH_ID_MSGVAL;
- for (i = 0, j = 0; i < cf->can_dlc; j++) {
- iowrite32(le32_to_cpu(cf->data[i++]),
- (&priv->regs->if2_dataa1) + j*4);
- if (i == cf->can_dlc)
- break;
- iowrite32(le32_to_cpu(cf->data[i++] << 8),
- (&priv->regs->if2_dataa1) + j*4);
- }
-
- can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+ /* If remote frame has to be transmitted.. */
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ id2 |= PCH_ID2_DIR;
- /* Updating the size of the data. */
- pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
- pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+ iowrite32(id2, &priv->regs->ifregs[1].id2);
- /* Clearing IntPend, NewDat & TxRqst */
- pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
+ /* Copy data to register */
+ for (i = 0; i < cf->can_dlc; i += 2) {
+ iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
+ &priv->regs->ifregs[1].data[i / 2]);
+ }
- /* Setting NewDat, TxRqst bits */
- pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
- pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+ /* Set the size of the data. Update if2_mcont */
+ iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
+ PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
return NETDEV_TX_OK;
}
@@ -1203,21 +959,98 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
struct pch_can_priv *priv = netdev_priv(ndev);
unregister_candev(priv->ndev);
- free_candev(priv->ndev);
pci_iounmap(pdev, priv->regs);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
+ free_candev(priv->ndev);
}
#ifdef CONFIG_PM
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
+}
+
+static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
+ enum pch_ifreg dir)
+{
+ u32 ie, enable;
+
+ if (dir)
+ ie = PCH_IF_MCONT_RXIE;
+ else
+ ie = PCH_IF_MCONT_TXIE;
+
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
+
+ if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
+ enable = 1;
+ else
+ enable = 0;
+
+ return enable;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, int set)
+{
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ if (set)
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
+
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+}
+
+static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
+{
+ u32 link;
+
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+
+ if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
+ link = 0;
+ else
+ link = 1;
+ return link;
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ (ioread32(&priv->regs->treq2) << 16);
+}
+
static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
{
- int i; /* Counter variable. */
- int retval; /* Return value. */
+ int i;
+ int retval;
u32 buf_stat; /* Variable for reading the transmit buffer status. */
- u32 counter = 0xFFFFFF;
+ int counter = PCH_COUNTER_LIMIT;
struct net_device *dev = pci_get_drvdata(pdev);
struct pch_can_priv *priv = netdev_priv(dev);
@@ -1226,7 +1059,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
pch_can_set_run_mode(priv, PCH_CAN_STOP);
/* Indicate that we are aboutto/in suspend */
- priv->can.state = CAN_STATE_SLEEPING;
+ priv->can.state = CAN_STATE_STOPPED;
/* Waiting for all transmission to complete. */
while (counter) {
@@ -1240,31 +1073,24 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
/* Save interrupt configuration and then disable them */
- pch_can_get_int_enables(priv, &(priv->int_enables));
+ priv->int_enables = pch_can_get_int_enables(priv);
pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
/* Save Tx buffer enable state */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_get_tx_enable(priv, i + 1,
- &(priv->tx_enable[i]));
- }
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ priv->tx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG);
/* Disable all Transmit buffers */
- pch_can_tx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
/* Save Rx buffer enable state */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- pch_can_get_rx_enable(priv, i + 1,
- &(priv->rx_enable[i]));
- pch_can_get_rx_buffer_link(priv, i + 1,
- &(priv->rx_link[i]));
- }
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ priv->rx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG);
+ priv->rx_link[i] = pch_can_get_rx_buffer_link(priv, i);
}
/* Disable all Receive buffers */
- pch_can_rx_disable_all(priv);
+ pch_can_set_rx_all(priv, 0);
retval = pci_save_state(pdev);
if (retval) {
dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1279,8 +1105,8 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
static int pch_can_resume(struct pci_dev *pdev)
{
- int i; /* Counter variable. */
- int retval; /* Return variable. */
+ int i;
+ int retval;
struct net_device *dev = pci_get_drvdata(pdev);
struct pch_can_priv *priv = netdev_priv(dev);
@@ -1312,23 +1138,16 @@ static int pch_can_resume(struct pci_dev *pdev)
pch_can_set_optmode(priv);
/* Enabling the transmit buffer. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX) {
- pch_can_set_tx_enable(priv, i + 1,
- priv->tx_enable[i]);
- }
- }
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, priv->tx_enable[i], PCH_TX_IFREG);
/* Configuring the receive buffer and enabling them. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- /* Restore buffer link */
- pch_can_set_rx_buffer_link(priv, i + 1,
- priv->rx_link[i]);
-
- /* Restore buffer enables */
- pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
- }
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i]);
+
+ /* Restore buffer enables */
+ pch_can_set_rxtx(priv, i, priv->rx_enable[i], PCH_RX_IFREG);
}
/* Enable CAN Interrupts */
@@ -1348,9 +1167,10 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
struct pch_can_priv *priv = netdev_priv(dev);
+ u32 errc = ioread32(&priv->regs->errc);
- bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
- bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+ bec->txerr = errc & PCH_TEC;
+ bec->rxerr = (errc & PCH_REC) >> 8;
return 0;
}
@@ -1361,7 +1181,6 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
struct net_device *ndev;
struct pch_can_priv *priv;
int rc;
- int index;
void __iomem *addr;
rc = pci_enable_device(pdev);
@@ -1383,7 +1202,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
goto probe_exit_ipmap;
}
- ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
if (!ndev) {
rc = -ENOMEM;
dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1218,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
priv->can.do_get_berr_counter = pch_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK;
- priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+ priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
ndev->irq = pdev->irq;
ndev->flags |= IFF_ECHO;
@@ -1407,15 +1226,18 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &pch_can_netdev_ops;
-
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
- for (index = 0; index < PCH_RX_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_RX;
- for (index = index; index < PCH_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_TX;
+ netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
- netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+ rc = pci_enable_msi(priv->dev);
+ if (rc) {
+ netdev_err(ndev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ netdev_err(ndev, "PCH CAN opened with MSI\n");
+ priv->use_msi = 1;
+ }
rc = register_candev(ndev);
if (rc) {
@@ -1426,6 +1248,8 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
return 0;
probe_exit_reg_candev:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
free_candev(ndev);
probe_exit_alloc_candev:
pci_iounmap(pdev, addr);
@@ -1458,6 +1282,6 @@ static void __exit pch_can_pci_exit(void)
}
module_exit(pch_can_pci_exit);
-MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 437b5c716a24..231385b8e08f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -383,7 +383,7 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
void __iomem *reset_addr;
int i;
- int reset_bar[2] = {3, 5};
+ static const int reset_bar[2] = {3, 5};
plx_pci_reset_common(pdev);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bbb..09c3e9db9316 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
res_size = resource_size(&res);
if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
return -EBUSY;
}
base = ioremap_nocache(res.start, res_size);
if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
err = -ENOMEM;
goto exit_release_mem;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 000000000000..b423965a78d1
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,756 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip.c
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
+ * at http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/can.h>
+
+static __initdata const char banner[] =
+ KERN_INFO "slcan: serial line CAN interface driver\n";
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+
+#define SLCAN_MAGIC 0x53CA
+
+static int maxdev = 10; /* MAX number of SLCAN channels;
+ This can be overridden with
+ insmod slcan.ko maxdev=nnn */
+module_param(maxdev, int, 0);
+MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
+
+struct slcan {
+ int magic;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLC_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_INUSE 0 /* Channel in use */
+#define SLF_ERROR 1 /* Parity, etc. error */
+
+ unsigned char leased;
+ dev_t line;
+ pid_t pid;
+};
+
+static struct net_device **slcan_devs;
+
+ /************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ ************************************************************************/
+
+/*
+ * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (can_dlc) which can be from 0 to 8
+ * and up to <can_dlc> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, can_dlc 0, no data
+ * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
+ *
+ */
+
+ /************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ ************************************************************************/
+
+static int asc2nibble(char c)
+{
+
+ if ((c >= '0') && (c <= '9'))
+ return c - '0';
+
+ if ((c >= 'A') && (c <= 'F'))
+ return c - 'A' + 10;
+
+ if ((c >= 'a') && (c <= 'f'))
+ return c - 'a' + 10;
+
+ return 16; /* error */
+}
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slc_bump(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame cf;
+ int i, dlc_pos, tmp;
+ unsigned long ultmp;
+ char cmd = sl->rbuff[0];
+
+ if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+ return;
+
+ if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
+ dlc_pos = 4; /* dlc position tiiid */
+ else
+ dlc_pos = 9; /* dlc position Tiiiiiiiid */
+
+ if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+ return;
+
+ cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+
+ sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
+
+ if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
+ return;
+
+ cf.can_id = ultmp;
+
+ if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
+ cf.can_id |= CAN_EFF_FLAG;
+
+ if ((cmd | 0x20) == 'r') /* RTR frame */
+ cf.can_id |= CAN_RTR_FLAG;
+
+ *(u64 *) (&cf.data) = 0; /* clear payload */
+
+ for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
+
+ tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+ if (tmp > 0x0F)
+ return;
+ cf.data[i] = (tmp << 4);
+ tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+ if (tmp > 0x0F)
+ return;
+ cf.data[i] |= tmp;
+ }
+
+
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (!skb)
+ return;
+
+ skb->dev = sl->dev;
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ memcpy(skb_put(skb, sizeof(struct can_frame)),
+ &cf, sizeof(struct can_frame));
+ netif_rx(skb);
+
+ sl->dev->stats.rx_packets++;
+ sl->dev->stats.rx_bytes += cf.can_dlc;
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+ (sl->rcount > 4)) {
+ slc_bump(sl);
+ }
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLC_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ } else {
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+ }
+}
+
+ /************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ ************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, idx, i;
+ char cmd;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ cmd = 'R'; /* becomes 'r' in standard frame format */
+ else
+ cmd = 'T'; /* becomes 't' in standard frame format */
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ sprintf(sl->xbuff, "%c%08X%d", cmd,
+ cf->can_id & CAN_EFF_MASK, cf->can_dlc);
+ else
+ sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
+ cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+
+ idx = strlen(sl->xbuff);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+
+ strcat(sl->xbuff, "\r"); /* add terminating character */
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
+ sl->xleft = strlen(sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+ sl->dev->stats.tx_bytes += cf->can_dlc;
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ int actual;
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ return;
+
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (skb->len != sizeof(struct can_frame))
+ goto out;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
+ goto out;
+ }
+ if (sl->tty == NULL) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+/* Netdevice UP -> DOWN routine */
+static int slc_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ }
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_unlock_bh(&sl->lock);
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slc_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (sl->tty == NULL)
+ return -ENODEV;
+
+ sl->flags &= (1 << SLF_INUSE);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Hook the destructor so we can free slcan devs at the right point in time */
+static void slc_free_netdev(struct net_device *dev)
+{
+ int i = dev->base_addr;
+ free_netdev(dev);
+ slcan_devs[i] = NULL;
+}
+
+static const struct net_device_ops slc_netdev_ops = {
+ .ndo_open = slc_open,
+ .ndo_stop = slc_close,
+ .ndo_start_xmit = slc_xmit,
+};
+
+static void slc_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &slc_netdev_ops;
+ dev->destructor = slc_free_netdev;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ dev->mtu = sizeof(struct can_frame);
+ dev->type = ARPHRD_CAN;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_NO_CSUM;
+}
+
+/******************************************
+ Routines looking at TTY side.
+ ******************************************/
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+
+static void slcan_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, char *fp, int count)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/************************************
+ * slcan_open helper routines.
+ ************************************/
+
+/* Collect hanged up channels */
+static void slc_sync(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slcan *sl;
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (dev == NULL)
+ break;
+
+ sl = netdev_priv(dev);
+ if (sl->tty || sl->leased)
+ continue;
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ }
+}
+
+/* Find a free SLCAN channel, and link in this `tty' line. */
+static struct slcan *slc_alloc(dev_t line)
+{
+ int i;
+ struct net_device *dev = NULL;
+ struct slcan *sl;
+
+ if (slcan_devs == NULL)
+ return NULL; /* Master array missing ! */
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (dev == NULL)
+ break;
+
+ }
+
+ /* Sorry, too many, all slots in use */
+ if (i >= maxdev)
+ return NULL;
+
+ if (dev) {
+ sl = netdev_priv(dev);
+ if (test_bit(SLF_INUSE, &sl->flags)) {
+ unregister_netdevice(dev);
+ dev = NULL;
+ slcan_devs[i] = NULL;
+ }
+ }
+
+ if (!dev) {
+ char name[IFNAMSIZ];
+ sprintf(name, "slcan%d", i);
+
+ dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+ if (!dev)
+ return NULL;
+ dev->base_addr = i;
+ }
+
+ sl = netdev_priv(dev);
+
+ /* Initialize channel control data */
+ sl->magic = SLCAN_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
+ slcan_devs[i] = dev;
+
+ return sl;
+}
+
+/*
+ * Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free SLCAN channel...
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+
+static int slcan_open(struct tty_struct *tty)
+{
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+
+ /* RTnetlink lock is misused here to serialize concurrent
+ opens of slcan channels. There are better ways, but it is
+ the simplest one.
+ */
+ rtnl_lock();
+
+ /* Collect hanged up channels. */
+ slc_sync();
+
+ sl = tty->disc_data;
+
+ err = -EEXIST;
+ /* First make sure we're not already connected. */
+ if (sl && sl->magic == SLCAN_MAGIC)
+ goto err_exit;
+
+ /* OK. Find a free SLCAN channel to use. */
+ err = -ENFILE;
+ sl = slc_alloc(tty_devnum(tty));
+ if (sl == NULL)
+ goto err_exit;
+
+ sl->tty = tty;
+ tty->disc_data = sl;
+ sl->line = tty_devnum(tty);
+ sl->pid = current->pid;
+
+ if (!test_bit(SLF_INUSE, &sl->flags)) {
+ /* Perform the low-level SLCAN initialization. */
+ sl->rcount = 0;
+ sl->xleft = 0;
+
+ set_bit(SLF_INUSE, &sl->flags);
+
+ err = register_netdevice(sl->dev);
+ if (err)
+ goto err_free_chan;
+ }
+
+ /* Done. We have linked the TTY line to a channel. */
+ rtnl_unlock();
+ tty->receive_room = 65536; /* We don't flow control */
+ return sl->dev->base_addr;
+
+err_free_chan:
+ sl->tty = NULL;
+ tty->disc_data = NULL;
+ clear_bit(SLF_INUSE, &sl->flags);
+
+err_exit:
+ rtnl_unlock();
+
+ /* Count references from TTY module */
+ return err;
+}
+
+/*
+ * Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ *
+ * We also use this method for a hangup event.
+ */
+
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+ return;
+
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ if (!sl->leased)
+ sl->line = 0;
+
+ /* Flush network side */
+ unregister_netdev(sl->dev);
+ /* This will complete via sl_free_netdev */
+}
+
+static int slcan_hangup(struct tty_struct *tty)
+{
+ slcan_close(tty);
+ return 0;
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+ unsigned int tmp;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, file, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slc_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "slcan",
+ .open = slcan_open,
+ .close = slcan_close,
+ .hangup = slcan_hangup,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ if (maxdev < 4)
+ maxdev = 4; /* Sanity */
+
+ printk(banner);
+ printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+
+ slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
+ if (!slcan_devs) {
+ printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
+ return -ENOMEM;
+ }
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
+ if (status) {
+ printk(KERN_ERR "slcan: can't register line discipline\n");
+ kfree(slcan_devs);
+ }
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slcan *sl;
+ unsigned long timeout = jiffies + HZ;
+ int busy = 0;
+
+ if (slcan_devs == NULL)
+ return;
+
+ /* First of all: check for active disciplines and hangup them.
+ */
+ do {
+ if (busy)
+ msleep_interruptible(100);
+
+ busy = 0;
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (!dev)
+ continue;
+ sl = netdev_priv(dev);
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ busy++;
+ tty_hangup(sl->tty);
+ }
+ spin_unlock_bh(&sl->lock);
+ }
+ } while (busy && time_before(jiffies, timeout));
+
+ /* FIXME: hangup is async so we should wait when doing this second
+ phase */
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (!dev)
+ continue;
+ slcan_devs[i] = NULL;
+
+ sl = netdev_priv(dev);
+ if (sl->tty) {
+ printk(KERN_ERR "%s: tty discipline still running\n",
+ dev->name);
+ /* Intentionally leak the control block. */
+ dev->destructor = NULL;
+ }
+
+ unregister_netdev(dev);
+ }
+
+ kfree(slcan_devs);
+ slcan_devs = NULL;
+
+ i = tty_unregister_ldisc(N_SLCAN);
+ if (i)
+ printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d6b6d6aa565a..73502fef8769 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2788,7 +2788,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
@@ -3880,7 +3880,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
schedule_work(&cp->reset_task);
#endif
- flush_scheduled_work();
+ flush_work_sync(&cp->reset_task);
return 0;
}
@@ -5177,7 +5177,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
- flush_scheduled_work();
+ cancel_work_sync(&cp->reset_task);
if (cp->hw_running)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 70221ca32683..f778b15ad3fd 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -273,6 +273,10 @@ struct sge {
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
+static const u8 ch_mac_addr[ETH_ALEN] = {
+ 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+};
+
/*
* stop tasklet and free all pending skb's
*/
@@ -2012,10 +2016,6 @@ static void espibug_workaround_t204(unsigned long data)
continue;
if (!skb->cb[0]) {
- u8 ch_mac_addr[ETH_ALEN] = {
- 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
- };
-
skb_copy_to_linear_data_offset(skb,
sizeof(struct cpl_tx_pkt),
ch_mac_addr,
@@ -2048,8 +2048,6 @@ static void espibug_workaround(unsigned long data)
if ((seop & 0xfff0fff) == 0xfff && skb) {
if (!skb->cb[0]) {
- u8 ch_mac_addr[ETH_ALEN] =
- {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
skb_copy_to_linear_data_offset(skb,
sizeof(struct cpl_tx_pkt),
ch_mac_addr,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60a..594ca9c2c10a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1695,7 +1695,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
*work = num;
return -EINVAL;
}
- *work = 2 + req2->num_additional_wqes;;
+ *work = 2 + req2->num_additional_wqes;
l5_cid = req1->iscsi_conn_id;
if (l5_cid >= MAX_ISCSI_TBL_SZ)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1c..80c2feeefec5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
/* Information that need to be kept for each board. */
struct net_local {
- struct net_device_stats stats;
struct mii_if_info mii_if;
/* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
/* remember we got an error */
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
/* reset the TX DMA in case it has hung on something */
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
* allocate a new buffer to put a packet in.
*/
e100_rx(dev);
- np->stats.rx_packets++;
+ dev->stats.rx_packets++;
/* restart/continue on the channel, for safety */
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
/* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
/* Report any packets that have been sent */
while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
(netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
- np->stats.tx_bytes += myFirstTxDesc->skb->len;
- np->stats.tx_packets++;
+ dev->stats.tx_bytes += myFirstTxDesc->skb->len;
+ dev->stats.tx_packets++;
/* dma is ready with the transmission of the data in tx_skb, so now
we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
e100nw_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = netdev_priv(dev);
unsigned long irqbits = *R_IRQ_MASK0_RD;
/* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
D(printk("ethernet receiver underrun!\n"));
}
/* check for overrun irq */
if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
- update_rx_stats(&np->stats); /* this will ack the irq */
+ update_rx_stats(&dev->stats); /* this will ack the irq */
D(printk("ethernet receiver overrun!\n"));
}
/* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
D(printk("ethernet excessive collisions!\n"));
}
return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
spin_unlock(&np->led_lock);
length = myNextRxDesc->descr.hw_len - 4;
- np->stats.rx_bytes += length;
+ dev->stats.rx_bytes += length;
#ifdef ETHDEBUG
printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
/* Small packet, copy data */
skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
if (!skb) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
goto update_nextrxdesc;
}
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
int align;
struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!new_skb) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
goto update_nextrxdesc;
}
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
static int
e100_close(struct net_device *dev)
{
- struct net_local *np = netdev_priv(dev);
-
printk(KERN_INFO "Closing %s.\n", dev->name);
netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
/* Update the statistics here. */
- update_rx_stats(&np->stats);
- update_tx_stats(&np->stats);
+ update_rx_stats(&dev->stats);
+ update_tx_stats(&dev->stats);
/* Stop speed/duplex timers */
del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- update_rx_stats(&lp->stats);
- update_tx_stats(&lp->stats);
+ update_rx_stats(&dev->stats);
+ update_tx_stats(&dev->stats);
spin_unlock_irqrestore(&lp->lock, flags);
- return &lp->stats;
+ return &dev->stats;
}
/*
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 35cd36729155..2028da95afa1 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -292,7 +292,7 @@ unknown:
*/
static int ael2005_setup_sr_edc(struct cphy *phy)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
{ MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
@@ -324,11 +324,11 @@ static int ael2005_setup_sr_edc(struct cphy *phy)
static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
{ 0, 0, 0, 0 }
};
- static struct reg_val preemphasis[] = {
+ static const struct reg_val preemphasis[] = {
{ MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
{ MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
{ 0, 0, 0, 0 }
@@ -393,7 +393,7 @@ static int ael2005_intr_clear(struct cphy *phy)
static int ael2005_reset(struct cphy *phy, int wait)
{
- static struct reg_val regs0[] = {
+ static const struct reg_val regs0[] = {
{ MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
@@ -403,7 +403,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
- static struct reg_val regs1[] = {
+ static const struct reg_val regs1[] = {
{ MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
{ MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
{ 0, 0, 0, 0 }
@@ -522,7 +522,7 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
*/
static int ael2020_setup_sr_edc(struct cphy *phy)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* set CDR offset to 10 */
{ MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
@@ -551,20 +551,20 @@ static int ael2020_setup_sr_edc(struct cphy *phy)
static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
{
/* set uC to 40MHz */
- static struct reg_val uCclock40MHz[] = {
+ static const struct reg_val uCclock40MHz[] = {
{ MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
{ MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
{ 0, 0, 0, 0 }
};
/* activate uC clock */
- static struct reg_val uCclockActivate[] = {
+ static const struct reg_val uCclockActivate[] = {
{ MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
{ 0, 0, 0, 0 }
};
/* set PC to start of SRAM and activate uC */
- static struct reg_val uCactivate[] = {
+ static const struct reg_val uCactivate[] = {
{ MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
{ MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
{ 0, 0, 0, 0 }
@@ -624,7 +624,7 @@ static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
*/
static int ael2020_intr_enable(struct cphy *phy)
{
- struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* output Module's Loss Of Signal (LOS) to LED */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
0xffff, 0x4 },
@@ -664,7 +664,7 @@ static int ael2020_intr_enable(struct cphy *phy)
*/
static int ael2020_intr_disable(struct cphy *phy)
{
- struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* reset "link status" LED to "off" */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
@@ -701,7 +701,7 @@ static int ael2020_intr_clear(struct cphy *phy)
return err ? err : t3_phy_lasi_intr_clear(phy);
}
-static struct reg_val ael2020_reset_regs[] = {
+static const struct reg_val ael2020_reset_regs[] = {
/* Erratum #2: CDRLOL asserted, causing PMA link down status */
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e272075..4d538a4e9d55 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1359,6 +1359,7 @@ out:
static int offload_close(struct t3cdev *tdev)
{
struct adapter *adapter = tdev2adap(tdev);
+ struct t3c_data *td = T3C_DATA(tdev);
if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
@@ -1369,7 +1370,7 @@ static int offload_close(struct t3cdev *tdev)
sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
/* Flush work scheduled while releasing TIDs */
- flush_scheduled_work();
+ flush_work_sync(&td->tid_release_task);
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
@@ -3006,12 +3007,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct adapter *adapter = pci_get_drvdata(pdev);
- int ret;
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- ret = t3_adapter_error(adapter, 0, 0);
+ t3_adapter_error(adapter, 0, 0);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->name = adapter->port[i]->name;
__set_bit(i, &adapter->registered_device_map);
- netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953d..ef02aa68c926 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
*/
void *cxgb_alloc_mem(unsigned long size)
{
- void *p = kmalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL);
if (!p)
- p = vmalloc(size);
- if (p)
- memset(p, 0, size);
+ p = vzalloc(size);
return p;
}
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3a6adf0b3e9d..ec8579a0a808 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1562,7 +1562,7 @@ static void tp_intr_handler(struct adapter *adapter)
{0}
};
- static struct intr_info tp_intr_info_t3c[] = {
+ static const struct intr_info tp_intr_info_t3c[] = {
{0x1fffffff, "TP parity error", -1, 1},
{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d4253d311eb..01d49eaa44d2 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -482,11 +482,9 @@ struct adapter {
void __iomem *regs;
struct pci_dev *pdev;
struct device *pdev_dev;
- unsigned long registered_device_map;
unsigned int fn;
unsigned int flags;
- const char *name;
int msg_enable;
struct adapter_params params;
@@ -497,7 +495,7 @@ struct adapter {
struct {
unsigned short vec;
- char desc[14];
+ char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
struct sge sge;
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f410b3..059c1eec8c3f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -522,39 +522,33 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
*/
static void name_msix_vecs(struct adapter *adap)
{
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
+ int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
/* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
- adap->msix_info[0].desc[n] = 0;
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
/* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
- adap->msix_info[1].desc[n] = 0;
+ snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
+ adap->port[0]->name);
/* Ethernet queues */
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
- for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+ for (i = 0; i < pi->nqsets; i++, msi_idx++)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
- adap->msix_info[msi_idx].desc[n] = 0;
- }
}
/* offload queues */
- for_each_ofldrxq(&adap->sge, i) {
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
- adap->name, i);
- adap->msix_info[msi_idx++].desc[n] = 0;
- }
- for_each_rdmarxq(&adap->sge, i) {
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
- adap->name, i);
- adap->msix_info[msi_idx++].desc[n] = 0;
- }
+ for_each_ofldrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ adap->port[0]->name, i);
+
+ for_each_rdmarxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
+ adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
@@ -868,12 +862,10 @@ out: release_firmware(fw);
*/
void *t4_alloc_mem(size_t size)
{
- void *p = kmalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL);
if (!p)
- p = vmalloc(size);
- if (p)
- memset(p, 0, size);
+ p = vzalloc(size);
return p;
}
@@ -1377,7 +1369,12 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
} else if (type == FW_PORT_TYPE_KR)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
else if (type == FW_PORT_TYPE_BP_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
@@ -2668,7 +2665,7 @@ static int cxgb_up(struct adapter *adap)
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
- adap->name, adap);
+ adap->port[0]->name, adap);
if (err)
goto irq_err;
}
@@ -2719,10 +2716,6 @@ static int cxgb_open(struct net_device *dev)
return err;
}
- netif_set_real_num_tx_queues(dev, pi->nqsets);
- err = netif_set_real_num_rx_queues(dev, pi->nqsets);
- if (err)
- return err;
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -3491,49 +3484,53 @@ static int __devinit init_rss(struct adapter *adap)
return 0;
}
-static void __devinit print_port_info(struct adapter *adap)
+static void __devinit print_port_info(const struct net_device *dev)
{
static const char *base[] = {
"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "KR SFP+", "KR FEC"
+ "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
};
- int i;
char buf[80];
+ char *bufp = buf;
const char *spd = "";
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
spd = " 2.5 GT/s";
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
spd = " 5 GT/s";
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- const struct port_info *pi = netdev_priv(dev);
- char *bufp = buf;
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s", base[pi->port_type]);
- if (!test_bit(i, &adap->registered_device_map))
- continue;
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ adap->params.vpd.id, adap->params.rev, buf,
+ is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "S/N: %s, E/C: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.ec);
+}
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
- bufp += sprintf(bufp, "10G/");
- if (bufp != buf)
- --bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+{
+ u16 v;
+ int pos;
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev,
- buf, is_offload(adap) ? "R" : "",
- adap->params.pci.width, spd,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- if (adap->name == dev->name)
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ pos = pci_pcie_cap(dev);
+ if (pos > 0) {
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_RELAX_EN;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
}
}
@@ -3611,6 +3608,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
pci_enable_pcie_error_reporting(pdev);
+ enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3630,7 +3628,6 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->fn = func;
- adapter->name = pci_name(pdev);
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3721,28 +3718,24 @@ static int __devinit init_one(struct pci_dev *pdev,
* register at least one net device.
*/
for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
+ netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+
err = register_netdev(adapter->port[i]);
if (err)
- dev_warn(&pdev->dev,
- "cannot register net device %s, skipping\n",
- adapter->port[i]->name);
- else {
- /*
- * Change the name we use for messages to the name of
- * the first successfully registered interface.
- */
- if (!adapter->registered_device_map)
- adapter->name = adapter->port[i]->name;
-
- __set_bit(i, &adapter->registered_device_map);
- adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
- netif_tx_stop_all_queues(adapter->port[i]);
- }
+ break;
+ adapter->chan_map[pi->tx_chan] = i;
+ print_port_info(adapter->port[i]);
}
- if (!adapter->registered_device_map) {
+ if (i == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
goto out_free_dev;
}
+ if (err) {
+ dev_warn(&pdev->dev, "only %d net devices registered\n", i);
+ err = 0;
+ };
if (cxgb4_debugfs_root) {
adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
@@ -3753,8 +3746,6 @@ static int __devinit init_one(struct pci_dev *pdev,
if (is_offload(adapter))
attach_ulds(adapter);
- print_port_info(adapter);
-
sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -3793,7 +3784,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
detach_ulds(adapter);
for_each_port(adapter, i)
- if (test_bit(i, &adapter->registered_device_map))
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
if (adapter->debugfs_root)
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 17022258ed68..311471b439a8 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -579,6 +579,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
* @phys: the physical address of the allocated ring
* @metadata: address of the array holding the SW state for the ring
* @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
*
* Allocates resources for an SGE descriptor ring, such as Tx queues,
* free buffer lists, or response queues. Each SGE ring requires
@@ -590,7 +591,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
*/
static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata,
- size_t stat_size)
+ size_t stat_size, int node)
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
@@ -599,7 +600,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
if (!p)
return NULL;
if (sw_size) {
- s = kcalloc(nelem, sw_size, GFP_KERNEL);
+ s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
if (!s) {
dma_free_coherent(dev, len, p, *phys);
@@ -1982,7 +1983,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0);
+ &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
if (!iq->desc)
return -ENOMEM;
@@ -2008,12 +2009,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, STAT_LEN);
+ &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
if (!fl->desc)
goto fl_nomem;
flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0FETCHRO(1) |
+ FW_IQ_CMD_FL0DATARO(1) |
FW_IQ_CMD_FL0PADEN);
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
@@ -2093,7 +2096,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ netdev_queue_numa_node_read(netdevq));
if (!txq->q.desc)
return -ENOMEM;
@@ -2106,6 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO(1) |
FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
FW_EQ_ETH_CMD_FBMAX(3) |
@@ -2144,7 +2149,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0);
+ NULL, 0, NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2158,6 +2163,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
c.physeqid_pkd = htonl(0);
c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO |
FW_EQ_CTRL_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
FW_EQ_CTRL_CMD_FBMAX(3) |
@@ -2194,7 +2200,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2207,6 +2214,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO(1) |
FW_EQ_OFLD_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
FW_EQ_OFLD_CMD_FBMAX(3) |
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d94aea8..b9fd8a6f2cc4 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -183,7 +183,7 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- static int delay[] = {
+ static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
@@ -330,18 +330,6 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-/*
- * Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R header.
- */
-struct t4_vpd_hdr {
- u8 id_tag;
- u8 id_len[2];
- u8 id_data[ID_LEN];
- u8 vpdr_tag;
- u8 vpdr_len[2];
-};
-
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
#define VPD_LEN 512
@@ -370,25 +358,38 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret;
- int ec, sn, v2;
+ int ec, sn;
u8 vpd[VPD_LEN], csum;
- unsigned int vpdr_len;
- const struct t4_vpd_hdr *v;
+ unsigned int vpdr_len, kw_offset, id_len;
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- v = (const struct t4_vpd_hdr *)vpd;
- vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
- if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
+ dev_err(adapter->pdev_dev, "missing VPD ID string\n");
+ return -EINVAL;
+ }
+
+ id_len = pci_vpd_lrdt_size(vpd);
+ if (id_len > ID_LEN)
+ id_len = ID_LEN;
+
+ i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ return -EINVAL;
+ }
+
+ vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
+ kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
+ if (vpdr_len + kw_offset > VPD_LEN) {
dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
return -EINVAL;
}
#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
- vpdr_len, name); \
+ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
if (var < 0) { \
dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
return -EINVAL; \
@@ -408,11 +409,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(v2, "V2");
#undef FIND_VPD_KW
- p->cclk = simple_strtoul(vpd + v2, NULL, 10);
- memcpy(p->id, v->id_data, ID_LEN);
+ memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
strim(p->id);
memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
@@ -919,7 +918,7 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
*/
static void pcie_intr_handler(struct adapter *adapter)
{
- static struct intr_info sysbus_intr_info[] = {
+ static const struct intr_info sysbus_intr_info[] = {
{ RNPP, "RXNP array parity error", -1, 1 },
{ RPCP, "RXPC array parity error", -1, 1 },
{ RCIP, "RXCIF array parity error", -1, 1 },
@@ -927,7 +926,7 @@ static void pcie_intr_handler(struct adapter *adapter)
{ RFTP, "RXFT array parity error", -1, 1 },
{ 0 }
};
- static struct intr_info pcie_port_intr_info[] = {
+ static const struct intr_info pcie_port_intr_info[] = {
{ TPCP, "TXPC array parity error", -1, 1 },
{ TNPP, "TXNP array parity error", -1, 1 },
{ TFTP, "TXFT array parity error", -1, 1 },
@@ -939,7 +938,7 @@ static void pcie_intr_handler(struct adapter *adapter)
{ TDUE, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
- static struct intr_info pcie_intr_info[] = {
+ static const struct intr_info pcie_intr_info[] = {
{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
{ MSIDATAPERR, "MSI data parity error", -1, 1 },
@@ -991,7 +990,7 @@ static void pcie_intr_handler(struct adapter *adapter)
*/
static void tp_intr_handler(struct adapter *adapter)
{
- static struct intr_info tp_intr_info[] = {
+ static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
{ 0 }
@@ -1008,7 +1007,7 @@ static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
- static struct intr_info sge_intr_info[] = {
+ static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE,
"SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC,
@@ -1053,7 +1052,7 @@ static void sge_intr_handler(struct adapter *adapter)
*/
static void cim_intr_handler(struct adapter *adapter)
{
- static struct intr_info cim_intr_info[] = {
+ static const struct intr_info cim_intr_info[] = {
{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
@@ -1063,7 +1062,7 @@ static void cim_intr_handler(struct adapter *adapter)
{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
- static struct intr_info cim_upintr_info[] = {
+ static const struct intr_info cim_upintr_info[] = {
{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
{ ILLWRINT, "CIM illegal write", -1, 1 },
@@ -1110,7 +1109,7 @@ static void cim_intr_handler(struct adapter *adapter)
*/
static void ulprx_intr_handler(struct adapter *adapter)
{
- static struct intr_info ulprx_intr_info[] = {
+ static const struct intr_info ulprx_intr_info[] = {
{ 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
@@ -1125,7 +1124,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
*/
static void ulptx_intr_handler(struct adapter *adapter)
{
- static struct intr_info ulptx_intr_info[] = {
+ static const struct intr_info ulptx_intr_info[] = {
{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
@@ -1147,7 +1146,7 @@ static void ulptx_intr_handler(struct adapter *adapter)
*/
static void pmtx_intr_handler(struct adapter *adapter)
{
- static struct intr_info pmtx_intr_info[] = {
+ static const struct intr_info pmtx_intr_info[] = {
{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
@@ -1169,7 +1168,7 @@ static void pmtx_intr_handler(struct adapter *adapter)
*/
static void pmrx_intr_handler(struct adapter *adapter)
{
- static struct intr_info pmrx_intr_info[] = {
+ static const struct intr_info pmrx_intr_info[] = {
{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
@@ -1188,7 +1187,7 @@ static void pmrx_intr_handler(struct adapter *adapter)
*/
static void cplsw_intr_handler(struct adapter *adapter)
{
- static struct intr_info cplsw_intr_info[] = {
+ static const struct intr_info cplsw_intr_info[] = {
{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
@@ -1207,7 +1206,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
- static struct intr_info le_intr_info[] = {
+ static const struct intr_info le_intr_info[] = {
{ LIPMISS, "LE LIP miss", -1, 0 },
{ LIP0, "LE 0 LIP error", -1, 0 },
{ PARITYERR, "LE parity error", -1, 1 },
@@ -1225,11 +1224,11 @@ static void le_intr_handler(struct adapter *adap)
*/
static void mps_intr_handler(struct adapter *adapter)
{
- static struct intr_info mps_rx_intr_info[] = {
+ static const struct intr_info mps_rx_intr_info[] = {
{ 0xffffff, "MPS Rx parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_tx_intr_info[] = {
+ static const struct intr_info mps_tx_intr_info[] = {
{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
@@ -1239,25 +1238,25 @@ static void mps_intr_handler(struct adapter *adapter)
{ FRMERR, "MPS Tx framing error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_trc_intr_info[] = {
+ static const struct intr_info mps_trc_intr_info[] = {
{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_sram_intr_info[] = {
+ static const struct intr_info mps_stat_sram_intr_info[] = {
{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_tx_intr_info[] = {
+ static const struct intr_info mps_stat_tx_intr_info[] = {
{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_rx_intr_info[] = {
+ static const struct intr_info mps_stat_rx_intr_info[] = {
{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_cls_intr_info[] = {
+ static const struct intr_info mps_cls_intr_info[] = {
{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
@@ -1356,7 +1355,7 @@ static void ma_intr_handler(struct adapter *adap)
*/
static void smb_intr_handler(struct adapter *adap)
{
- static struct intr_info smb_intr_info[] = {
+ static const struct intr_info smb_intr_info[] = {
{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
@@ -1372,7 +1371,7 @@ static void smb_intr_handler(struct adapter *adap)
*/
static void ncsi_intr_handler(struct adapter *adap)
{
- static struct intr_info ncsi_intr_info[] = {
+ static const struct intr_info ncsi_intr_info[] = {
{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
@@ -1410,7 +1409,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
*/
static void pl_intr_handler(struct adapter *adap)
{
- static struct intr_info pl_intr_info[] = {
+ static const struct intr_info pl_intr_info[] = {
{ FATALPERR, "T4 fatal parity error", -1, 1 },
{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
@@ -2408,7 +2407,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
if (index < NEXACT_MAC)
ret++;
else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ *hash |= (1ULL << hash_mac_addr(addr[i]));
}
return ret;
}
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 940584a8a640..edcfd7ec7802 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -1239,6 +1239,7 @@ enum fw_port_type {
FW_PORT_TYPE_KR,
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
+ FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4766b4116b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
* MSI-X interrupt index usage.
*/
MSIX_FW = 0, /* MSI-X index for firmware Q */
- MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
+ MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
MSIX_EXTRAS = 1,
MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..3c403f895750 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
const struct port_info *pi = netdev_priv(dev);
int qs, msi;
- for (qs = 0, msi = MSIX_NIQFLINT;
- qs < pi->nqsets;
- qs++, msi++) {
+ for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
snprintf(adapter->msix_info[msi].desc, namelen,
"%s-%d", dev->name, qs);
adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
/*
* Ethernet queues.
*/
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq) {
err = request_irq(adapter->msix_info[msi].vec,
t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
int rxq, msi;
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq)
free_irq(adapter->msix_info[msi++].vec,
&s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
* brought up at which point lots of things get nailed down
* permanently ...
*/
- msix = MSIX_NIQFLINT;
+ msix = MSIX_IQFLINT;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
@@ -753,7 +751,9 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- link_start(dev);
+ err = link_start(dev);
+ if (err)
+ return err;
netif_tx_start_all_queues(dev);
return 0;
}
@@ -814,40 +814,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
}
/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- for_each_dev_addr(dev, ha) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ for_each_dev_addr(dev, ha)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ netdev_for_each_mc_addr(ha, dev)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
@@ -860,16 +868,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u64 mhash = 0;
u64 uhash = 0;
bool free = true;
- u16 filt_idx[7];
+ unsigned int offset, naddr;
const u8 *addr[7];
- int ret, naddr = 0;
+ int ret;
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
- naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
+ naddr, addr, NULL, &uhash, sleep);
if (ret < 0)
return ret;
@@ -877,12 +889,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
+ naddr, addr, NULL, &mhash, sleep);
if (ret < 0)
return ret;
+ free = false;
}
return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1120,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
return 0;
}
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /*
- * XXX For now just use the default hash but we probably want to
- * XXX look at other possibilities ...
- */
- return skb_tx_hash(dev, skb);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Poll all of our receive queues. This is called outside of normal interrupt
@@ -1358,6 +1363,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 lro_pkts;
+ u64 lro_merged;
};
/*
@@ -1395,6 +1402,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROPackets ",
+ "GROMerged ",
};
/*
@@ -1444,6 +1453,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
stats->rx_csum += rxq->stats.rx_cso;
stats->vlan_ex += rxq->stats.vlan_ex;
stats->vlan_ins += txq->vlan_ins;
+ stats->lro_pkts += rxq->stats.lro_pkts;
+ stats->lro_merged += rxq->stats.lro_merged;
}
}
@@ -1540,14 +1551,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
}
/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+/*
* Set TCP Segmentation Offloading feature capabilities.
*/
static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
{
if (tso)
- dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features |= TSO_FLAGS;
else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ dev->features &= ~TSO_FLAGS;
return 0;
}
@@ -2038,7 +2054,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
* Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
* it to our caller to tear down the directory (debugfs_root).
*/
-static void __devexit cleanup_debugfs(struct adapter *adapter)
+static void cleanup_debugfs(struct adapter *adapter)
{
BUG_ON(adapter->debugfs_root == NULL);
@@ -2056,7 +2072,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
* adapter parameters we're going to be using and initialize basic adapter
* hardware support.
*/
-static int adap_init0(struct adapter *adapter)
+static int __devinit adap_init0(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
@@ -2075,6 +2091,22 @@ static int adap_init0(struct adapter *adapter)
}
/*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
* into the adapter. We just have to live with them ... Note that
@@ -2246,6 +2278,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
+ size_t iqe_size;
/*
* We should not be called till we know how many Queue Sets we can
@@ -2290,6 +2323,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
s->ethqsets = qidx;
/*
+ * The Ingress Queue Entry Size for our various Response Queues needs
+ * to be big enough to accommodate the largest message we can receive
+ * from the chip/firmware; which is 64 bytes ...
+ */
+ iqe_size = 64;
+
+ /*
* Set up default Queue Set parameters ... Start off with the
* shortest interrupt holdoff timer.
*/
@@ -2297,7 +2337,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
struct sge_eth_txq *txq = &s->ethtxq[qs];
- init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+ init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq->fl.size = 72;
txq->q.size = 1024;
}
@@ -2306,8 +2346,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* The firmware event queue is used for link state changes and
* notifications of TX DMA completions.
*/
- init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
- L1_CACHE_BYTES);
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
/*
* The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2323,7 +2362,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* any time ...
*/
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
- L1_CACHE_BYTES);
+ iqe_size);
}
/*
@@ -2417,7 +2456,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
- .ndo_select_queue = cxgb4vf_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2465,7 +2503,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
version_printed = 1;
}
-
/*
* Initialize generic PCI device state.
*/
@@ -2600,10 +2637,9 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
- netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ netdev->features = (NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_GRO);
@@ -2625,7 +2661,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->do_ioctl = cxgb4vf_do_ioctl;
netdev->change_mtu = cxgb4vf_change_mtu;
netdev->set_mac_address = cxgb4vf_set_mac_addr;
- netdev->select_queue = cxgb4vf_select_queue;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb4vf_poll_controller;
#endif
@@ -2844,6 +2879,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
{ 0, }
};
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..e0b3d1bc2fdf 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
-};
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
/*
* Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
}
/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
*
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
- struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
+ * Convert the Packet Gather List into an skb.
*/
- if (len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, gl->frags[0].size);
- skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
- } else {
- skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, RX_PKT_PULL_LEN);
- skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = (gl->frags[0].page_offset +
- RX_PKT_PULL_LEN);
- ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
- skb->len = len + PKTSHIFT;
- skb->data_len = skb->len - RX_PKT_PULL_LEN;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
}
-
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
@@ -1536,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
+ /*
+ * Deliver the packet to the stack.
+ */
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1549,11 +1584,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
-
-nomem:
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
}
/**
@@ -1679,6 +1709,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
}
len = RSPD_LEN(len);
}
+ gl.tot_len = len;
/*
* Gather packet fragments.
@@ -2115,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
/*
* Calculate the size of the hardware free list ring plus
- * status page (which the SGE will place at the end of the
+ * Status Page (which the SGE will place after the end of the
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2212,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct port_info *pi = netdev_priv(dev);
/*
- * Calculate the size of the hardware TX Queue (including the
- * status age on the end) in units of TX Descriptors.
+ * Calculate the size of the hardware TX Queue (including the Status
+ * Page on the end of the TX Queue) in units of TX Descriptors.
*/
nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..e4bec78c8e3f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -116,7 +116,7 @@ static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- static int delay[] = {
+ static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100
};
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
}
/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
+ unsigned nfilters = 0;
+ unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- struct fw_vi_mac_exact *p;
- size_t len16;
- if (naddr > ARRAY_SIZE(cmd.u.exact))
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
return -EINVAL;
- len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
- u.exact[naddr]), 16);
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ for (offset = 0; offset < naddr; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+ ? rem
+ : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
- for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx =
- cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
- ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
- if (ret)
- return ret;
-
- for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
- if (idx)
- idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
- ? 0xffff
- : index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
- ret++;
- else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+ sleep_ok);
+ if (ret && ret != -ENOMEM)
+ break;
+
+ for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] =
+ (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ /*
+ * If there were no errors or we merely ran out of room in our MAC
+ * address arena, return the number of filters actually written.
+ */
+ if (ret == 0 || ret == -ENOMEM)
+ ret = nfilters;
return ret;
}
@@ -1257,7 +1300,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
*/
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
- struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+ const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
@@ -1265,7 +1308,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
/*
* Link/module state change message.
*/
- const struct fw_port_cmd *port_cmd = (void *)rpl;
+ const struct fw_port_cmd *port_cmd =
+ (const struct fw_port_cmd *)rpl;
u32 word;
int action, port_id, link_ok, speed, fc, pidx;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06bf..2d4c4fc1d900 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
- dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
+ dm9000_release_board(pdev, netdev_priv(ndev));
free_netdev(ndev); /* free device structure */
dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index c7e242b69a18..77d08e697b74 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4892,11 +4892,11 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
} else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
u16 cur_agc_value;
u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- { IGP01E1000_PHY_AGC_A,
- IGP01E1000_PHY_AGC_B,
- IGP01E1000_PHY_AGC_C,
- IGP01E1000_PHY_AGC_D
+ static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5071,11 +5071,11 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
{
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
- u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- { IGP01E1000_PHY_AGC_PARAM_A,
- IGP01E1000_PHY_AGC_PARAM_B,
- IGP01E1000_PHY_AGC_PARAM_C,
- IGP01E1000_PHY_AGC_PARAM_D
+ static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
};
u16 min_length, max_length;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c3983fc3..340e12d2e4a9 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__E1000_DOWN, &adapter->flags);
/* disable receives in the hardware */
rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * timers and tasks from rescheduling.
+ */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -967,11 +971,13 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
- pr_err("No usable DMA config, aborting\n");
- goto err_dma;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -1425,13 +1431,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
- txdr->buffer_info = vmalloc(size);
+ txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info) {
e_err(probe, "Unable to allocate memory for the Tx descriptor "
"ring\n");
return -ENOMEM;
}
- memset(txdr->buffer_info, 0, size);
/* round up to nearest 4K */
@@ -1621,13 +1626,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc(size);
+ rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info) {
e_err(probe, "Unable to allocate memory for the Rx descriptor "
"ring\n");
return -ENOMEM;
}
- memset(rxdr->buffer_info, 0, size);
desc_len = sizeof(struct e1000_rx_desc);
@@ -2722,7 +2726,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 10d8d98bb797..1301eba8b57a 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -352,12 +352,13 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
{ /* Flow Control */
- struct e1000_opt_list fc_list[] =
- {{ E1000_FC_NONE, "Flow Control Disabled" },
- { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
- { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
- { E1000_FC_FULL, "Flow Control Enabled" },
- { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+ static const struct e1000_opt_list fc_list[] = {
+ { E1000_FC_NONE, "Flow Control Disabled" },
+ { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
+ { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
+ { E1000_FC_FULL, "Flow Control Enabled" },
+ { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
+ };
opt = (struct e1000_option) {
.type = list_option,
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a53ba0..e57e4097ef1b 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,7 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
@@ -74,6 +75,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +111,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
phy->type = e1000_phy_bm;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
break;
default:
return -E1000_ERR_PHY;
@@ -200,6 +206,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
break;
}
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -542,6 +559,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msleep(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ e_dbg("Driver can't access the PHY\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ mutex_lock(&swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ e1000_put_hw_semaphore_82573(hw);
+ mutex_unlock(&swflag_mutex);
+}
/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +667,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
- case e1000_82574:
- case e1000_82583:
break;
default:
ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +956,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext, icr;
s32 ret_val;
- u16 i = 0;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +982,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
- extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- do {
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = er32(EXTCNF_CTRL);
-
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
- break;
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- msleep(2);
- i++;
- } while (i < MDIO_OWNERSHIP_TIMEOUT);
+ ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
+ if (ret_val)
+ e_dbg("Cannot acquire MDIO ownership\n");
ctrl = er32(CTRL);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10);
ctrl_ext = er32(CTRL_EXT);
@@ -1402,6 +1504,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
u32 rxcw;
u32 ctrl;
u32 status;
+ u32 txcw;
+ u32 i;
s32 ret_val = 0;
ctrl = er32(CTRL);
@@ -1422,8 +1526,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
}
- break;
+ break;
case e1000_serdes_link_forced_up:
/*
@@ -1431,8 +1537,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
*/
- if (rxcw & E1000_RXCW_C) {
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1440,6 +1548,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
}
break;
@@ -1495,6 +1605,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
e_dbg("DOWN -> AN_PROG\n");
break;
}
@@ -1505,16 +1616,32 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
- * We have sync, and can tolerate one invalid (IV)
- * codeword before declaring link down, so reread
- * to look again.
+ * Check several times, if Sync and Config
+ * both are consistently 1 then simply ignore
+ * the Invalid bit and restart Autoneg
*/
- udelay(10);
- rxcw = er32(RXCW);
- if (rxcw & E1000_RXCW_IV) {
- mac->serdes_link_state = e1000_serdes_link_down;
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ udelay(10);
+ rxcw = er32(RXCW);
+ if ((rxcw & E1000_RXCW_IV) &&
+ !((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = er32(TXCW);
+ txcw |= E1000_TXCW_ANE;
+ ew32(TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
- e_dbg("ANYSTATE -> DOWN\n");
+ e_dbg("ANYSTATE -> AN_PROG\n");
}
}
}
@@ -1897,7 +2024,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG,
- .pba = 36,
+ .pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1914,7 +2041,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 36,
+ .pba = 32,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f973..7245dc2e0b7c 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -488,6 +488,9 @@
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
@@ -516,6 +519,7 @@
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
@@ -649,13 +653,16 @@
/* Mask bits for fields in Word 0x03 of the EEPROM */
#define NVM_COMPAT_LOM 0x0800
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
-
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - SPI */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index fdc67fead4ea..2c913b8e9116 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -482,6 +482,7 @@ extern const char e1000e_driver_version[];
extern void e1000e_check_options(struct e1000_adapter *adapter);
extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+extern void e1000e_led_blink_task(struct work_struct *work);
extern int e1000e_up(struct e1000_adapter *adapter);
extern void e1000e_down(struct e1000_adapter *adapter);
@@ -513,7 +514,8 @@ extern struct e1000_info e1000_pch_info;
extern struct e1000_info e1000_pch2_info;
extern struct e1000_info e1000_es2_info;
-extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
extern s32 e1000e_commit_phy(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 8984d165a39b..39349d6dcd0b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -194,20 +194,6 @@ static int e1000_get_settings(struct net_device *netdev,
return 0;
}
-static u32 e1000_get_link(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * Avoid touching hardware registers when possible, otherwise
- * link negotiation can get messed up when user-level scripts
- * are rapidly polling the driver to see if link is up.
- */
- return netif_running(netdev) ? netif_carrier_ok(netdev) :
- !!(er32(STATUS) & E1000_STATUS_LU);
-}
-
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
{
struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -1263,6 +1249,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
u32 ctrl_reg = 0;
u32 stat_reg = 0;
u16 phy_reg = 0;
+ s32 ret_val = 0;
hw->mac.autoneg = 0;
@@ -1322,7 +1309,13 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82577:
case e1000_phy_82578:
/* Workaround: K1 must be disabled for stable 1Gbps operation */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Cannot setup 1Gbps loopback.\n");
+ return ret_val;
+ }
e1000_configure_k1_ich8lan(hw, false);
+ hw->phy.ops.release(hw);
break;
case e1000_phy_82579:
/* Disable PHY energy detect power down */
@@ -1860,7 +1853,7 @@ static int e1000_set_wol(struct net_device *netdev,
/* bit defines for adapter->led_status */
#define E1000_LED_ON 0
-static void e1000e_led_blink_task(struct work_struct *work)
+void e1000e_led_blink_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, led_blink_task);
@@ -1892,7 +1885,6 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
(hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_82583) ||
(hw->mac.type == e1000_82574)) {
- INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function =
@@ -1986,6 +1978,9 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
p = (char *) adapter +
e1000_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
@@ -2024,7 +2019,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e3374d9a2472..5080372b0fd7 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,12 +338,17 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
}
phy->id = e1000_phy_unknown;
- ret_val = e1000e_get_phy_id(hw);
- if (ret_val)
- goto out;
- if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
/*
- * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -352,6 +357,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
goto out;
+ break;
}
phy->type = e1000e_get_phy_type_from_id(phy->id);
@@ -3591,7 +3597,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
ew32(PHY_CTRL, phy_ctrl);
if (hw->mac.type >= e1000_pchlan) {
- e1000_oem_bits_config_ich8lan(hw, true);
+ e1000_oem_bits_config_ich8lan(hw, false);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return;
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 0fd4eb5ac5fb..8377523c054a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2139,6 +2139,119 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ e_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ e_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ e_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ e_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ e_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
@@ -2579,25 +2692,3 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
out:
return ret_val;
}
-
-s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
-{
- s32 ret_val;
- u16 nvm_data;
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- *pba_num = (u32)(nvm_data << 16);
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- *pba_num |= nvm_data;
-
- return 0;
-}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f532..a45dafdf343a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
#define DRV_EXTRAVERSION "-k2"
-#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
+#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
int err = -ENOMEM, size;
size = sizeof(struct e1000_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
int i, size, desc_len, err = -ENOMEM;
size = sizeof(struct e1000_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
@@ -2132,7 +2130,7 @@ err_pages:
}
err:
vfree(rx_ring->buffer_info);
- e_err("Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the receive descriptor ring\n");
return err;
}
@@ -4475,7 +4473,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
@@ -4595,7 +4593,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);;
+ e1000_put_txbuf(adapter, buffer_info);
}
return 0;
@@ -4631,7 +4629,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
i = tx_ring->next_to_use;
- while (count--) {
+ do {
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4642,7 +4640,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
i++;
if (i == tx_ring->count)
i = 0;
- }
+ } while (--count > 0);
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -5465,6 +5463,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int vector, msix_irq;
+
+ if (adapter->msix_entries) {
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
@@ -5474,10 +5502,21 @@ static void e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
-
- enable_irq(adapter->pdev->irq);
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
}
#endif
@@ -5587,7 +5626,8 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 pba_num;
+ u32 ret_val;
+ u8 pba_str[E1000_PBANUM_LENGTH];
/* print bus type/speed/width info */
e_info("(PCI Express:2.5GB/s:%s) %pM\n",
@@ -5598,9 +5638,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
(hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
- e1000e_read_pba_num(hw, &pba_num);
- e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
+ ret_val = e1000_read_pba_string_generic(hw, pba_str,
+ E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strcpy(pba_str, "Unknown");
+ e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, pba_str);
}
static void e1000_eeprom_checks(struct e1000_adapter *adapter)
@@ -5864,6 +5907,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+ INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
@@ -5984,8 +6028,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
bool down = test_bit(__E1000_DOWN, &adapter->state);
/*
- * flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
*/
if (!down)
set_bit(__E1000_DOWN, &adapter->state);
@@ -5996,8 +6040,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
+ cancel_work_sync(&adapter->led_blink_task);
cancel_work_sync(&adapter->print_hang_task);
- flush_scheduled_work();
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 3d36911f77f3..a9612b0e4bca 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -421,7 +421,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
- .err = "defaulting to enabled",
+ .err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 3d3dc0c82355..95da38693b77 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -226,6 +226,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
}
*data = (u16) mdic;
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
return 0;
}
@@ -279,6 +286,13 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PHY;
}
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
return 0;
}
@@ -1840,11 +1854,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
- u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
- {IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D};
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 06e72fbef862..94ec973b2bdc 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -216,7 +216,7 @@ static int __init e21_probe1(struct net_device *dev, int ioaddr)
printk(" %02X", station_addr[i]);
if (dev->irq < 2) {
- int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
+ static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
for (i = 0; i < ARRAY_SIZE(irqlist); i++)
if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
dev->irq = irqlist[i];
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5a..4fa8d2a4aef3 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
#define ee_id_eepro10p1 0x31
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
/* Index to functions, as function prototypes. */
@@ -891,12 +891,13 @@ err:
there is non-reboot way to recover if something goes wrong.
*/
-static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
-static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
static int eepro_grab_irq(struct net_device *dev)
{
- int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
- int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+ static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
+ const int *irqp = irqlist;
+ int temp_reg, ioaddr = dev->base_addr;
eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 12c37d264108..48ee51bb9e50 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1103,7 +1103,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
{
- static char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0};
+ static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
/* Use the IRQ from EEPROM if none was given */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 8e745e74828d..a724a2d14506 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -130,19 +130,6 @@
/* utility functions */
-#define ehea_info(fmt, args...) \
- printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
-
-#define ehea_error(fmt, args...) \
- printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
-
-#ifdef DEBUG
-#define ehea_debug(fmt, args...) \
- printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
-#else
-#define ehea_debug(fmt, args...) do {} while (0)
-#endif
-
void ehea_dump(void *adr, int len, char *msg);
#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
@@ -515,6 +502,4 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
-extern struct work_struct ehea_rereg_mr_task;
-
#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099ce49c9..afebf2075779 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ehea.h"
#include "ehea_phyp.h"
@@ -118,10 +120,10 @@ doit:
ret = ehea_set_portspeed(port, sp);
if (!ret)
- ehea_info("%s: Port speed successfully set: %dMbps "
- "%s Duplex",
- port->netdev->name, port->port_speed,
- port->full_duplex == 1 ? "Full" : "Half");
+ netdev_info(dev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
out:
return ret;
}
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev)
ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
if (!ret)
- ehea_info("%s: Port speed successfully set: %dMbps "
- "%s Duplex",
- port->netdev->name, port->port_speed,
- port->full_duplex == 1 ? "Full" : "Half");
+ netdev_info(port->netdev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
return ret;
}
@@ -261,6 +263,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
}
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+ | ETH_FLAG_TXVLAN
+ | ETH_FLAG_RXVLAN);
+}
+
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +282,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
.get_ethtool_stats = ehea_get_ethtool_stats,
.get_rx_csum = ehea_get_rx_csum,
.set_settings = ehea_set_settings,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ehea_set_flags,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..1032b5bbe238 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -101,7 +103,6 @@ MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
-struct work_struct ehea_rereg_mr_task;
static DEFINE_MUTEX(dlpar_mem_lock);
struct ehea_fw_handle_array ehea_fw_handles;
struct ehea_bcmc_reg_array ehea_bcmc_regs;
@@ -136,8 +137,8 @@ void ehea_dump(void *adr, int len, char *msg)
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
- printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
- deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
+ pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
+ msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
}
@@ -337,7 +338,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
cb2 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb2) {
- ehea_error("no mem for cb2");
+ netdev_err(dev, "no mem for cb2\n");
goto out;
}
@@ -345,7 +346,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
port->logical_port_id,
H_PORT_CB2, H_PORT_CB2_ALL, cb2);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ netdev_err(dev, "query_ehea_port failed\n");
goto out_herr;
}
@@ -400,6 +401,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
+ netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -422,13 +424,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
struct net_device *dev = pr->port->netdev;
int i;
- for (i = 0; i < pr->rq1_skba.len; i++) {
+ if (nr_rq1a > pr->rq1_skba.len) {
+ netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i])
+ if (!skb_arr_rq1[i]) {
+ netdev_info(dev, "Not enough memory to allocate skb array\n");
break;
+ }
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, nr_rq1a);
+ ehea_update_rq1a(pr->qp, i);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -461,8 +470,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
if (!skb) {
q_skba->os_skbs = fill_wqes - i;
if (q_skba->os_skbs == q_skba->len - 2) {
- ehea_info("%s: rq%i ran dry - no mem for skb",
- pr->port->netdev->name, rq_nr);
+ netdev_info(pr->port->netdev,
+ "rq%i ran dry - no mem for skb\n",
+ rq_nr);
ret = -ENOMEM;
}
break;
@@ -627,8 +637,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
if (netif_msg_rx_err(pr->port)) {
- ehea_error("Critical receive error for QP %d. "
- "Resetting port.", pr->qp->init_attr.qp_nr);
+ pr_err("Critical receive error for QP %d. Resetting port.\n",
+ pr->qp->init_attr.qp_nr);
ehea_dump(cqe, sizeof(*cqe), "CQE");
}
ehea_schedule_port_reset(pr->port);
@@ -675,7 +685,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
pr->port->vgrp);
- if (use_lro) {
+ if (skb->dev->features & NETIF_F_LRO) {
if (vlan_extracted)
lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
pr->port->vgrp,
@@ -730,13 +740,15 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb_arr_rq1_len,
wqe_index);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("LL rq1: skb=NULL");
+ netif_info(port, rx_err, dev,
+ "LL rq1: skb=NULL\n");
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb)
+ if (!skb) {
+ netdev_err(dev, "Not enough memory to allocate skb\n");
break;
+ }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
@@ -746,8 +758,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("rq2: skb=NULL");
+ netif_err(port, rx_err, dev,
+ "rq2: skb=NULL\n");
break;
}
ehea_fill_skb(dev, skb, cqe);
@@ -757,8 +769,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("rq3: skb=NULL");
+ netif_err(port, rx_err, dev,
+ "rq3: skb=NULL\n");
break;
}
ehea_fill_skb(dev, skb, cqe);
@@ -777,7 +789,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (use_lro)
+ if (dev->features & NETIF_F_LRO)
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
@@ -830,7 +842,7 @@ static void check_sqs(struct ehea_port *port)
msecs_to_jiffies(100));
if (!ret) {
- ehea_error("HW/SW queues out of sync");
+ pr_err("HW/SW queues out of sync\n");
ehea_schedule_port_reset(pr->port);
return;
}
@@ -863,14 +875,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
}
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Bad send completion status=0x%04X",
- cqe->status);
+ pr_err("Bad send completion status=0x%04X\n",
+ cqe->status);
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
- ehea_error("Resetting port");
+ pr_err("Resetting port\n");
ehea_schedule_port_reset(pr->port);
break;
}
@@ -988,8 +1000,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
while (eqe) {
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
- ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
- eqe->entry, qp_token);
+ pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
+ eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
@@ -1007,7 +1019,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
}
if (reset_port) {
- ehea_error("Resetting port");
+ pr_err("Resetting port\n");
ehea_schedule_port_reset(port);
}
@@ -1035,7 +1047,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
/* may be called via ehea_neq_tasklet() */
cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb0) {
- ehea_error("no mem for cb0");
+ pr_err("no mem for cb0\n");
ret = -ENOMEM;
goto out;
}
@@ -1127,7 +1139,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
- ehea_error("no mem for cb4");
+ pr_err("no mem for cb4\n");
ret = -ENOMEM;
goto out;
}
@@ -1178,16 +1190,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
break;
}
} else {
- ehea_error("Failed sensing port speed");
+ pr_err("Failed sensing port speed\n");
ret = -EIO;
}
} else {
if (hret == H_AUTHORITY) {
- ehea_info("Hypervisor denied setting port speed");
+ pr_info("Hypervisor denied setting port speed\n");
ret = -EPERM;
} else {
ret = -EIO;
- ehea_error("Failed setting port speed");
+ pr_err("Failed setting port speed\n");
}
}
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1204,80 +1216,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
u8 ec;
u8 portnum;
struct ehea_port *port;
+ struct net_device *dev;
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
+ dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
if (!port) {
- ehea_error("unknown portnum %x", portnum);
+ netdev_err(dev, "unknown portnum %x\n", portnum);
break;
}
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
- if (!netif_carrier_ok(port->netdev)) {
+ if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);
if (ret) {
- ehea_error("failed resensing port "
- "attributes");
+ netdev_err(dev, "failed resensing port attributes\n");
break;
}
- if (netif_msg_link(port))
- ehea_info("%s: Logical port up: %dMbps "
- "%s Duplex",
- port->netdev->name,
- port->port_speed,
- port->full_duplex ==
- 1 ? "Full" : "Half");
+ netif_info(port, link, dev,
+ "Logical port up: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ?
+ "Full" : "Half");
- netif_carrier_on(port->netdev);
- netif_wake_queue(port->netdev);
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
}
} else
- if (netif_carrier_ok(port->netdev)) {
- if (netif_msg_link(port))
- ehea_info("%s: Logical port down",
- port->netdev->name);
- netif_carrier_off(port->netdev);
- netif_stop_queue(port->netdev);
+ if (netif_carrier_ok(dev)) {
+ netif_info(port, link, dev,
+ "Logical port down\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
port->phy_link = EHEA_PHY_LINK_UP;
- if (netif_msg_link(port))
- ehea_info("%s: Physical port up",
- port->netdev->name);
+ netif_info(port, link, dev,
+ "Physical port up\n");
if (prop_carrier_state)
- netif_carrier_on(port->netdev);
+ netif_carrier_on(dev);
} else {
port->phy_link = EHEA_PHY_LINK_DOWN;
- if (netif_msg_link(port))
- ehea_info("%s: Physical port down",
- port->netdev->name);
+ netif_info(port, link, dev,
+ "Physical port down\n");
if (prop_carrier_state)
- netif_carrier_off(port->netdev);
+ netif_carrier_off(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
- ehea_info("External switch port is primary port");
+ netdev_info(dev,
+ "External switch port is primary port\n");
else
- ehea_info("External switch port is backup port");
+ netdev_info(dev,
+ "External switch port is backup port\n");
break;
case EHEA_EC_ADAPTER_MALFUNC:
- ehea_error("Adapter malfunction");
+ netdev_err(dev, "Adapter malfunction\n");
break;
case EHEA_EC_PORT_MALFUNC:
- ehea_info("Port malfunction: Device: %s", port->netdev->name);
- netif_carrier_off(port->netdev);
- netif_stop_queue(port->netdev);
+ netdev_info(dev, "Port malfunction\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
break;
default:
- ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
+ netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
break;
}
}
@@ -1289,13 +1299,13 @@ static void ehea_neq_tasklet(unsigned long data)
u64 event_mask;
eqe = ehea_poll_eq(adapter->neq);
- ehea_debug("eqe=%p", eqe);
+ pr_debug("eqe=%p\n", eqe);
while (eqe) {
- ehea_debug("*eqe=%lx", eqe->entry);
+ pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
ehea_parse_eqe(adapter, eqe->entry);
eqe = ehea_poll_eq(adapter->neq);
- ehea_debug("next eqe=%p", eqe);
+ pr_debug("next eqe=%p\n", eqe);
}
event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1344,14 +1354,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
ehea_qp_aff_irq_handler,
IRQF_DISABLED, port->int_aff_name, port);
if (ret) {
- ehea_error("failed registering irq for qp_aff_irq_handler:"
- "ist=%X", port->qp_eq->attr.ist1);
+ netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
+ port->qp_eq->attr.ist1);
goto out_free_qpeq;
}
- if (netif_msg_ifup(port))
- ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
- "registered", port->qp_eq->attr.ist1);
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
+ port->qp_eq->attr.ist1);
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1363,14 +1373,13 @@ static int ehea_reg_interrupts(struct net_device *dev)
IRQF_DISABLED, pr->int_send_name,
pr);
if (ret) {
- ehea_error("failed registering irq for ehea_queue "
- "port_res_nr:%d, ist=%X", i,
- pr->eq->attr.ist1);
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+ i, pr->eq->attr.ist1);
goto out_free_req;
}
- if (netif_msg_ifup(port))
- ehea_info("irq_handle 0x%X for function ehea_queue_int "
- "%d registered", pr->eq->attr.ist1, i);
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function ehea_queue_int %d registered\n",
+ pr->eq->attr.ist1, i);
}
out:
return ret;
@@ -1401,16 +1410,16 @@ static void ehea_free_interrupts(struct net_device *dev)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
pr = &port->port_res[i];
ibmebus_free_irq(pr->eq->attr.ist1, pr);
- if (netif_msg_intr(port))
- ehea_info("free send irq for res %d with handle 0x%X",
- i, pr->eq->attr.ist1);
+ netif_info(port, intr, dev,
+ "free send irq for res %d with handle 0x%X\n",
+ i, pr->eq->attr.ist1);
}
/* associated events */
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
- if (netif_msg_intr(port))
- ehea_info("associated event interrupt for handle 0x%X freed",
- port->qp_eq->attr.ist1);
+ netif_info(port, intr, dev,
+ "associated event interrupt for handle 0x%X freed\n",
+ port->qp_eq->attr.ist1);
}
static int ehea_configure_port(struct ehea_port *port)
@@ -1479,7 +1488,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
out_free:
ehea_rem_mr(&pr->send_mr);
out:
- ehea_error("Generating SMRS failed\n");
+ pr_err("Generating SMRS failed\n");
return -EIO;
}
@@ -1496,12 +1505,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
{
int arr_size = sizeof(void *) * max_q_entries;
- q_skba->arr = vmalloc(arr_size);
+ q_skba->arr = vzalloc(arr_size);
if (!q_skba->arr)
return -ENOMEM;
- memset(q_skba->arr, 0, arr_size);
-
q_skba->len = max_q_entries;
q_skba->index = 0;
q_skba->os_skbs = 0;
@@ -1536,7 +1543,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
if (!pr->eq) {
- ehea_error("create_eq failed (eq)");
+ pr_err("create_eq failed (eq)\n");
goto out_free;
}
@@ -1544,7 +1551,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq->fw_handle,
port->logical_port_id);
if (!pr->recv_cq) {
- ehea_error("create_cq failed (cq_recv)");
+ pr_err("create_cq failed (cq_recv)\n");
goto out_free;
}
@@ -1552,19 +1559,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq->fw_handle,
port->logical_port_id);
if (!pr->send_cq) {
- ehea_error("create_cq failed (cq_send)");
+ pr_err("create_cq failed (cq_send)\n");
goto out_free;
}
if (netif_msg_ifup(port))
- ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
- pr->send_cq->attr.act_nr_of_cqes,
- pr->recv_cq->attr.act_nr_of_cqes);
+ pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
+ pr->send_cq->attr.act_nr_of_cqes,
+ pr->recv_cq->attr.act_nr_of_cqes);
init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
if (!init_attr) {
ret = -ENOMEM;
- ehea_error("no mem for ehea_qp_init_attr");
+ pr_err("no mem for ehea_qp_init_attr\n");
goto out_free;
}
@@ -1589,18 +1596,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
if (!pr->qp) {
- ehea_error("create_qp failed");
+ pr_err("create_qp failed\n");
ret = -EIO;
goto out_free;
}
if (netif_msg_ifup(port))
- ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
- "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
- init_attr->act_nr_send_wqes,
- init_attr->act_nr_rwqes_rq1,
- init_attr->act_nr_rwqes_rq2,
- init_attr->act_nr_rwqes_rq3);
+ pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
+ init_attr->qp_nr,
+ init_attr->act_nr_send_wqes,
+ init_attr->act_nr_rwqes_rq1,
+ init_attr->act_nr_rwqes_rq2,
+ init_attr->act_nr_rwqes_rq3);
pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
@@ -1751,7 +1758,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
swqe->descriptors++;
}
} else
- ehea_error("cannot handle fragmented headers");
+ pr_err("cannot handle fragmented headers\n");
}
static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1847,8 +1854,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("%sregistering bc address failed (tagged)",
- hcallid == H_REG_BCMC ? "" : "de");
+ pr_err("%sregistering bc address failed (tagged)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1859,8 +1866,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("%sregistering bc address failed (vlan)",
- hcallid == H_REG_BCMC ? "" : "de");
+ pr_err("%sregistering bc address failed (vlan)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
}
out_herr:
@@ -1882,7 +1889,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
- ehea_error("no mem for cb0");
+ pr_err("no mem for cb0\n");
ret = -ENOMEM;
goto out;
}
@@ -1930,11 +1937,11 @@ out:
static void ehea_promiscuous_error(u64 hret, int enable)
{
if (hret == H_AUTHORITY)
- ehea_info("Hypervisor denied %sabling promiscuous mode",
- enable == 1 ? "en" : "dis");
+ pr_info("Hypervisor denied %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
else
- ehea_error("failed %sabling promiscuous mode",
- enable == 1 ? "en" : "dis");
+ pr_err("failed %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
}
static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1948,7 +1955,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb7) {
- ehea_error("no mem for cb7");
+ pr_err("no mem for cb7\n");
goto out;
}
@@ -2008,7 +2015,7 @@ static int ehea_drop_multicast_list(struct net_device *dev)
hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
H_DEREG_BCMC);
if (hret) {
- ehea_error("failed deregistering mcast MAC");
+ pr_err("failed deregistering mcast MAC\n");
ret = -EIO;
}
@@ -2031,7 +2038,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
if (!hret)
port->allmulti = 1;
else
- ehea_error("failed enabling IFF_ALLMULTI");
+ netdev_err(dev,
+ "failed enabling IFF_ALLMULTI\n");
}
} else
if (!enable) {
@@ -2040,7 +2048,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
if (!hret)
port->allmulti = 0;
else
- ehea_error("failed disabling IFF_ALLMULTI");
+ netdev_err(dev,
+ "failed disabling IFF_ALLMULTI\n");
}
}
@@ -2051,7 +2060,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
if (!ehea_mcl_entry) {
- ehea_error("no mem for mcl_entry");
+ pr_err("no mem for mcl_entry\n");
return;
}
@@ -2064,7 +2073,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
if (!hret)
list_add(&ehea_mcl_entry->list, &port->mc_list->list);
else {
- ehea_error("failed registering mcast MAC");
+ pr_err("failed registering mcast MAC\n");
kfree(ehea_mcl_entry);
}
}
@@ -2097,9 +2106,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
- ehea_info("Mcast registration limit reached (0x%llx). "
- "Use ALLMULTI!",
- port->adapter->max_mc_mac);
+ pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
+ port->adapter->max_mc_mac);
goto out;
}
@@ -2305,10 +2313,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
- if (netif_msg_tx_queued(port)) {
- ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
+ netif_info(port, tx_queued, dev,
+ "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
+ if (netif_msg_tx_queued(port))
ehea_dump(swqe, 512, "swqe");
- }
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
netif_stop_queue(dev);
@@ -2344,14 +2352,14 @@ static void ehea_vlan_rx_register(struct net_device *dev,
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
free_page((unsigned long)cb1);
out:
@@ -2368,14 +2376,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ pr_err("query_ehea_port failed\n");
goto out;
}
@@ -2385,7 +2393,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
out:
free_page((unsigned long)cb1);
return;
@@ -2403,14 +2411,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ pr_err("query_ehea_port failed\n");
goto out;
}
@@ -2420,7 +2428,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
out:
free_page((unsigned long)cb1);
}
@@ -2442,7 +2450,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ pr_err("query_ehea_qp failed (1)\n");
goto out;
}
@@ -2451,14 +2459,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ pr_err("modify_ehea_qp failed (1)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ pr_err("query_ehea_qp failed (2)\n");
goto out;
}
@@ -2467,14 +2475,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (2)");
+ pr_err("modify_ehea_qp failed (2)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (3)");
+ pr_err("query_ehea_qp failed (3)\n");
goto out;
}
@@ -2483,14 +2491,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (3)");
+ pr_err("modify_ehea_qp failed (3)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (4)");
+ pr_err("query_ehea_qp failed (4)\n");
goto out;
}
@@ -2511,7 +2519,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
EHEA_MAX_ENTRIES_EQ, 1);
if (!port->qp_eq) {
ret = -EINVAL;
- ehea_error("ehea_create_eq failed (qp_eq)");
+ pr_err("ehea_create_eq failed (qp_eq)\n");
goto out_kill_eq;
}
@@ -2592,27 +2600,27 @@ static int ehea_up(struct net_device *dev)
ret = ehea_port_res_setup(port, port->num_def_qps,
port->num_add_tx_qps);
if (ret) {
- ehea_error("port_res_failed");
+ netdev_err(dev, "port_res_failed\n");
goto out;
}
/* Set default QP for this port */
ret = ehea_configure_port(port);
if (ret) {
- ehea_error("ehea_configure_port failed. ret:%d", ret);
+ netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
goto out_clean_pr;
}
ret = ehea_reg_interrupts(dev);
if (ret) {
- ehea_error("reg_interrupts failed. ret:%d", ret);
+ netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
goto out_clean_pr;
}
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
- ehea_error("activate_qp failed");
+ netdev_err(dev, "activate_qp failed\n");
goto out_free_irqs;
}
}
@@ -2620,7 +2628,7 @@ static int ehea_up(struct net_device *dev)
for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_fill_port_res(&port->port_res[i]);
if (ret) {
- ehea_error("out_free_irqs");
+ netdev_err(dev, "out_free_irqs\n");
goto out_free_irqs;
}
}
@@ -2643,7 +2651,7 @@ out_clean_pr:
ehea_clean_all_portres(port);
out:
if (ret)
- ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+ netdev_info(dev, "Failed starting. ret=%i\n", ret);
ehea_update_bcmc_registrations();
ehea_update_firmware_handles();
@@ -2674,8 +2682,7 @@ static int ehea_open(struct net_device *dev)
mutex_lock(&port->port_lock);
- if (netif_msg_ifup(port))
- ehea_info("enabling port %s", dev->name);
+ netif_info(port, ifup, dev, "enabling port\n");
ret = ehea_up(dev);
if (!ret) {
@@ -2710,8 +2717,7 @@ static int ehea_down(struct net_device *dev)
ret = ehea_clean_all_portres(port);
if (ret)
- ehea_info("Failed freeing resources for %s. ret=%i",
- dev->name, ret);
+ netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
ehea_update_firmware_handles();
@@ -2723,8 +2729,7 @@ static int ehea_stop(struct net_device *dev)
int ret;
struct ehea_port *port = netdev_priv(dev);
- if (netif_msg_ifdown(port))
- ehea_info("disabling port %s", dev->name);
+ netif_info(port, ifdown, dev, "disabling port\n");
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
cancel_work_sync(&port->reset_task);
@@ -2765,7 +2770,7 @@ static void ehea_flush_sq(struct ehea_port *port)
msecs_to_jiffies(100));
if (!ret) {
- ehea_error("WARNING: sq not flushed completely");
+ pr_err("WARNING: sq not flushed completely\n");
break;
}
}
@@ -2801,7 +2806,7 @@ int ehea_stop_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ pr_err("query_ehea_qp failed (1)\n");
goto out;
}
@@ -2813,7 +2818,7 @@ int ehea_stop_qps(struct net_device *dev)
1), cb0, &dummy64,
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ pr_err("modify_ehea_qp failed (1)\n");
goto out;
}
@@ -2821,14 +2826,14 @@ int ehea_stop_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ pr_err("query_ehea_qp failed (2)\n");
goto out;
}
/* deregister shared memory regions */
dret = ehea_rem_smrs(pr);
if (dret) {
- ehea_error("unreg shared memory region failed");
+ pr_err("unreg shared memory region failed\n");
goto out;
}
}
@@ -2897,7 +2902,7 @@ int ehea_restart_qps(struct net_device *dev)
ret = ehea_gen_smrs(pr);
if (ret) {
- ehea_error("creation of shared memory regions failed");
+ netdev_err(dev, "creation of shared memory regions failed\n");
goto out;
}
@@ -2908,7 +2913,7 @@ int ehea_restart_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ netdev_err(dev, "query_ehea_qp failed (1)\n");
goto out;
}
@@ -2920,7 +2925,7 @@ int ehea_restart_qps(struct net_device *dev)
1), cb0, &dummy64,
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ netdev_err(dev, "modify_ehea_qp failed (1)\n");
goto out;
}
@@ -2928,7 +2933,7 @@ int ehea_restart_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ netdev_err(dev, "query_ehea_qp failed (2)\n");
goto out;
}
@@ -2965,8 +2970,7 @@ static void ehea_reset_port(struct work_struct *work)
ehea_set_multicast_list(dev);
- if (netif_msg_timer(port))
- ehea_info("Device %s resetted successfully", dev->name);
+ netif_info(port, timer, dev, "reset successful\n");
port_napi_enable(port);
@@ -2976,12 +2980,12 @@ out:
mutex_unlock(&dlpar_mem_lock);
}
-static void ehea_rereg_mrs(struct work_struct *work)
+static void ehea_rereg_mrs(void)
{
int ret, i;
struct ehea_adapter *adapter;
- ehea_info("LPAR memory changed - re-initializing driver");
+ pr_info("LPAR memory changed - re-initializing driver\n");
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
@@ -3013,8 +3017,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
/* Unregister old memory region */
ret = ehea_rem_mr(&adapter->mr);
if (ret) {
- ehea_error("unregister MR failed - driver"
- " inoperable!");
+ pr_err("unregister MR failed - driver inoperable!\n");
goto out;
}
}
@@ -3026,8 +3029,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
/* Register new memory region */
ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
if (ret) {
- ehea_error("register MR failed - driver"
- " inoperable!");
+ pr_err("register MR failed - driver inoperable!\n");
goto out;
}
@@ -3050,7 +3052,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
}
}
- ehea_info("re-initializing driver complete");
+ pr_info("re-initializing driver complete\n");
out:
return;
}
@@ -3103,7 +3105,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
/* (Try to) enable *jumbo frames */
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
- ehea_error("no mem for cb4");
+ pr_err("no mem for cb4\n");
ret = -ENOMEM;
goto out;
} else {
@@ -3165,13 +3167,13 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
- ehea_error("failed to register device. ret=%d", ret);
+ pr_err("failed to register device. ret=%d\n", ret);
goto out;
}
ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
if (ret) {
- ehea_error("failed to register attributes, ret=%d", ret);
+ pr_err("failed to register attributes, ret=%d\n", ret);
goto out_unreg_of_dev;
}
@@ -3221,7 +3223,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev = alloc_etherdev(sizeof(struct ehea_port));
if (!dev) {
- ehea_error("no mem for net_device");
+ pr_err("no mem for net_device\n");
ret = -ENOMEM;
goto out_err;
}
@@ -3268,11 +3270,14 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
| NETIF_F_LLTX;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
ret = register_netdev(dev);
if (ret) {
- ehea_error("register_netdev failed. ret=%d", ret);
+ pr_err("register_netdev failed. ret=%d\n", ret);
goto out_unreg_port;
}
@@ -3280,11 +3285,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ret = ehea_get_jumboframe_status(port, &jumbo);
if (ret)
- ehea_error("failed determining jumbo frame status for %s",
- port->netdev->name);
+ netdev_err(dev, "failed determining jumbo frame status\n");
- ehea_info("%s: Jumbo frames are %sabled", dev->name,
- jumbo == 1 ? "en" : "dis");
+ netdev_info(dev, "Jumbo frames are %sabled\n",
+ jumbo == 1 ? "en" : "dis");
adapter->active_ports++;
@@ -3300,14 +3304,16 @@ out_free_ethdev:
free_netdev(dev);
out_err:
- ehea_error("setting up logical port with id=%d failed, ret=%d",
- logical_port_id, ret);
+ pr_err("setting up logical port with id=%d failed, ret=%d\n",
+ logical_port_id, ret);
return NULL;
}
static void ehea_shutdown_single_port(struct ehea_port *port)
{
struct ehea_adapter *adapter = port->adapter;
+
+ cancel_work_sync(&port->reset_task);
unregister_netdev(port->netdev);
ehea_unregister_port(port);
kfree(port->mc_list);
@@ -3329,13 +3335,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
- ehea_error("bad device node: eth_dn name=%s",
- eth_dn->full_name);
+ pr_err("bad device node: eth_dn name=%s\n",
+ eth_dn->full_name);
continue;
}
if (ehea_add_adapter_mr(adapter)) {
- ehea_error("creating MR failed");
+ pr_err("creating MR failed\n");
of_node_put(eth_dn);
return -EIO;
}
@@ -3344,9 +3350,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
*dn_log_port_id,
eth_dn);
if (adapter->port[i])
- ehea_info("%s -> logical port id #%d",
- adapter->port[i]->netdev->name,
- *dn_log_port_id);
+ netdev_info(adapter->port[i]->netdev,
+ "logical port id #%d\n", *dn_log_port_id);
else
ehea_remove_adapter_mr(adapter);
@@ -3391,21 +3396,20 @@ static ssize_t ehea_probe_port(struct device *dev,
port = ehea_get_port(adapter, logical_port_id);
if (port) {
- ehea_info("adding port with logical port id=%d failed. port "
- "already configured as %s.", logical_port_id,
- port->netdev->name);
+ netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
+ logical_port_id);
return -EINVAL;
}
eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
if (!eth_dn) {
- ehea_info("no logical port with id %d found", logical_port_id);
+ pr_info("no logical port with id %d found\n", logical_port_id);
return -EINVAL;
}
if (ehea_add_adapter_mr(adapter)) {
- ehea_error("creating MR failed");
+ pr_err("creating MR failed\n");
return -EIO;
}
@@ -3420,8 +3424,8 @@ static ssize_t ehea_probe_port(struct device *dev,
break;
}
- ehea_info("added %s (logical port id=%d)", port->netdev->name,
- logical_port_id);
+ netdev_info(port->netdev, "added: (logical port id=%d)\n",
+ logical_port_id);
} else {
ehea_remove_adapter_mr(adapter);
return -EIO;
@@ -3444,8 +3448,8 @@ static ssize_t ehea_remove_port(struct device *dev,
port = ehea_get_port(adapter, logical_port_id);
if (port) {
- ehea_info("removed %s (logical port id=%d)", port->netdev->name,
- logical_port_id);
+ netdev_info(port->netdev, "removed: (logical port id=%d)\n",
+ logical_port_id);
ehea_shutdown_single_port(port);
@@ -3455,8 +3459,8 @@ static ssize_t ehea_remove_port(struct device *dev,
break;
}
} else {
- ehea_error("removing port with logical port id=%d failed. port "
- "not configured.", logical_port_id);
+ pr_err("removing port with logical port id=%d failed. port not configured.\n",
+ logical_port_id);
return -EINVAL;
}
@@ -3493,7 +3497,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
int ret;
if (!dev || !dev->dev.of_node) {
- ehea_error("Invalid ibmebus device probed");
+ pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
}
@@ -3597,8 +3601,6 @@ static int __devexit ehea_remove(struct platform_device *dev)
ehea_remove_device_sysfs(dev);
- flush_scheduled_work();
-
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
tasklet_kill(&adapter->neq_tasklet);
@@ -3641,21 +3643,21 @@ static int ehea_mem_notifier(struct notifier_block *nb,
switch (action) {
case MEM_CANCEL_OFFLINE:
- ehea_info("memory offlining canceled");
+ pr_info("memory offlining canceled");
/* Readd canceled memory block */
case MEM_ONLINE:
- ehea_info("memory is going online");
+ pr_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
goto out_unlock;
- ehea_rereg_mrs(NULL);
+ ehea_rereg_mrs();
break;
case MEM_GOING_OFFLINE:
- ehea_info("memory is going offline");
+ pr_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
goto out_unlock;
- ehea_rereg_mrs(NULL);
+ ehea_rereg_mrs();
break;
default:
break;
@@ -3677,7 +3679,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
if (action == SYS_RESTART) {
- ehea_info("Reboot: freeing all eHEA resources");
+ pr_info("Reboot: freeing all eHEA resources\n");
ibmebus_unregister_driver(&ehea_driver);
}
return NOTIFY_DONE;
@@ -3693,22 +3695,22 @@ static int check_module_parm(void)
if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
(rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
- ehea_info("Bad parameter: rq1_entries");
+ pr_info("Bad parameter: rq1_entries\n");
ret = -EINVAL;
}
if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
(rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
- ehea_info("Bad parameter: rq2_entries");
+ pr_info("Bad parameter: rq2_entries\n");
ret = -EINVAL;
}
if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
(rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
- ehea_info("Bad parameter: rq3_entries");
+ pr_info("Bad parameter: rq3_entries\n");
ret = -EINVAL;
}
if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
(sq_entries > EHEA_MAX_ENTRIES_SQ)) {
- ehea_info("Bad parameter: sq_entries");
+ pr_info("Bad parameter: sq_entries\n");
ret = -EINVAL;
}
@@ -3728,11 +3730,8 @@ int __init ehea_module_init(void)
{
int ret;
- printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
- DRV_VERSION);
-
+ pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
- INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
@@ -3749,27 +3748,27 @@ int __init ehea_module_init(void)
ret = register_reboot_notifier(&ehea_reboot_nb);
if (ret)
- ehea_info("failed registering reboot notifier");
+ pr_info("failed registering reboot notifier\n");
ret = register_memory_notifier(&ehea_mem_nb);
if (ret)
- ehea_info("failed registering memory remove notifier");
+ pr_info("failed registering memory remove notifier\n");
ret = crash_shutdown_register(ehea_crash_handler);
if (ret)
- ehea_info("failed registering crash handler");
+ pr_info("failed registering crash handler\n");
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
- ehea_error("failed registering eHEA device driver on ebus");
+ pr_err("failed registering eHEA device driver on ebus\n");
goto out2;
}
ret = driver_create_file(&ehea_driver.driver,
&driver_attr_capabilities);
if (ret) {
- ehea_error("failed to register capabilities attribute, ret=%d",
- ret);
+ pr_err("failed to register capabilities attribute, ret=%d\n",
+ ret);
goto out3;
}
@@ -3789,13 +3788,12 @@ static void __exit ehea_module_exit(void)
{
int ret;
- flush_scheduled_work();
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
unregister_reboot_notifier(&ehea_reboot_nb);
ret = crash_shutdown_unregister(ehea_crash_handler);
if (ret)
- ehea_info("failed unregistering crash handler");
+ pr_info("failed unregistering crash handler\n");
unregister_memory_notifier(&ehea_mem_nb);
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 8fe9dcaa7538..0506967b9044 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ehea_phyp.h"
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode,
}
if (ret < H_SUCCESS)
- ehea_error("opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx ",
- opcode, ret,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7);
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
return ret;
}
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode,
&& (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
|| (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
&& (arg3 == H_PORT_CB7_DUCQPN)))))
- ehea_error("opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
- " arg9=%lx"
- " out1=%lx out2=%lx out3=%lx out4=%lx"
- " out5=%lx out6=%lx out7=%lx out8=%lx"
- " out9=%lx",
- opcode, ret,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9,
- outs[0], outs[1], outs[2], outs[3],
- outs[4], outs[5], outs[6], outs[7],
- outs[8]);
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx out4=%lx"
+ " out5=%lx out6=%lx out7=%lx out8=%lx"
+ " out9=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9,
+ outs[0], outs[1], outs[2], outs[3], outs[4],
+ outs[5], outs[6], outs[7], outs[8]);
return ret;
}
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u64 log_pageaddr, const u64 count)
{
if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
- ehea_error("not on pageboundary");
+ pr_err("not on pageboundary\n");
return H_PARAMETER;
}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 89128b6373e3..cd44bb8017d9 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/slab.h>
#include "ehea.h"
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue)
queue->current_q_offset -= queue->pagesize;
retvalue = NULL;
} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
- ehea_error("not on pageboundary");
+ pr_err("not on pageboundary\n");
retvalue = NULL;
}
return retvalue;
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
int i, k;
if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
- ehea_error("pagesize conflict! kernel pagesize=%d, "
- "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+ pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
+ (int)PAGE_SIZE, (int)pagesize);
return -EINVAL;
}
queue->queue_length = nr_of_pages * pagesize;
queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
- ehea_error("no mem for queue_pages");
+ pr_err("no mem for queue_pages\n");
return -ENOMEM;
}
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
- ehea_error("no mem for cq");
+ pr_err("no mem for cq\n");
goto out_nomem;
}
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_cq failed");
+ pr_err("alloc_resource_cq failed\n");
goto out_freemem;
}
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
for (counter = 0; counter < cq->attr.nr_pages; counter++) {
vpage = hw_qpageit_get_inc(&cq->hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
goto out_kill_hwq;
}
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
0, EHEA_CQ_REGISTER_ORIG,
cq->fw_handle, rpage, 1);
if (hret < H_SUCCESS) {
- ehea_error("register_rpage_cq failed ehea_cq=%p "
- "hret=%llx counter=%i act_pages=%i",
- cq, hret, counter, cq->attr.nr_pages);
+ pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
+ cq, hret, counter, cq->attr.nr_pages);
goto out_kill_hwq;
}
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
vpage = hw_qpageit_get_inc(&cq->hw_queue);
if ((hret != H_SUCCESS) || (vpage)) {
- ehea_error("registration of pages not "
- "complete hret=%llx\n", hret);
+ pr_err("registration of pages not complete hret=%llx\n",
+ hret);
goto out_kill_hwq;
}
} else {
if (hret != H_PAGE_REGISTERED) {
- ehea_error("CQ: registration of page failed "
- "hret=%llx\n", hret);
+ pr_err("CQ: registration of page failed hret=%llx\n",
+ hret);
goto out_kill_hwq;
}
}
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy CQ failed");
+ pr_err("destroy CQ failed\n");
return -EIO;
}
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
- ehea_error("no mem for eq");
+ pr_err("no mem for eq\n");
return NULL;
}
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_eq(adapter->handle,
&eq->attr, &eq->fw_handle);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_eq failed");
+ pr_err("alloc_resource_eq failed\n");
goto out_freemem;
}
ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
EHEA_PAGESIZE, sizeof(struct ehea_eqe));
if (ret) {
- ehea_error("can't allocate eq pages");
+ pr_err("can't allocate eq pages\n");
goto out_freeres;
}
for (i = 0; i < eq->attr.nr_pages; i++) {
vpage = hw_qpageit_get_inc(&eq->hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
hret = H_RESOURCE;
goto out_kill_hwq;
}
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy EQ failed");
+ pr_err("destroy EQ failed\n");
return -EIO;
}
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
for (cnt = 0; cnt < nr_pages; cnt++) {
vpage = hw_qpageit_get_inc(hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
goto out_kill_hwq;
}
rpage = virt_to_abs(vpage);
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
0, h_call_q_selector,
qp->fw_handle, rpage, 1);
if (hret < H_SUCCESS) {
- ehea_error("register_rpage_qp failed");
+ pr_err("register_rpage_qp failed\n");
goto out_kill_hwq;
}
}
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
- ehea_error("no mem for qp");
+ pr_err("no mem for qp\n");
return NULL;
}
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
&qp->fw_handle, &qp->epas);
if (hret != H_SUCCESS) {
- ehea_error("ehea_h_alloc_resource_qp failed");
+ pr_err("ehea_h_alloc_resource_qp failed\n");
goto out_freemem;
}
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_sq, adapter,
0);
if (ret) {
- ehea_error("can't register for sq ret=%x", ret);
+ pr_err("can't register for sq ret=%x\n", ret);
goto out_freeres;
}
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq1,
adapter, 1);
if (ret) {
- ehea_error("can't register for rq1 ret=%x", ret);
+ pr_err("can't register for rq1 ret=%x\n", ret);
goto out_kill_hwsq;
}
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq2,
adapter, 2);
if (ret) {
- ehea_error("can't register for rq2 ret=%x", ret);
+ pr_err("can't register for rq2 ret=%x\n", ret);
goto out_kill_hwr1q;
}
}
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq3,
adapter, 3);
if (ret) {
- ehea_error("can't register for rq3 ret=%x", ret);
+ pr_err("can't register for rq3 ret=%x\n", ret);
goto out_kill_hwr2q;
}
}
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy QP failed");
+ pr_err("destroy QP failed\n");
return -EIO;
}
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
(hret != H_PAGE_REGISTERED)) {
ehea_h_free_resource(adapter->handle, mr->handle,
FORCE_FREE);
- ehea_error("register_rpage_mr failed");
+ pr_err("register_rpage_mr failed\n");
return hret;
}
}
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!pt) {
- ehea_error("no mem");
+ pr_err("no mem\n");
ret = -ENOMEM;
goto out;
}
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
&mr->handle, &mr->lkey);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_mr failed");
+ pr_err("alloc_resource_mr failed\n");
ret = -EIO;
goto out;
}
if (!ehea_bmap) {
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
- ehea_error("no busmap available");
+ pr_err("no busmap available\n");
ret = -EIO;
goto out;
}
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
if (hret != H_SUCCESS) {
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
- ehea_error("registering mr failed");
+ pr_err("registering mr failed\n");
ret = -EIO;
goto out;
}
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr)
hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
FORCE_FREE);
if (hret != H_SUCCESS) {
- ehea_error("destroy MR failed");
+ pr_err("destroy MR failed\n");
return -EIO;
}
@@ -987,14 +988,14 @@ void print_error_data(u64 *data)
length = EHEA_PAGESIZE;
if (type == EHEA_AER_RESTYPE_QP)
- ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
- "port=%llX", resource, data[6], data[12], data[22]);
+ pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
+ resource, data[6], data[12], data[22]);
else if (type == EHEA_AER_RESTYPE_CQ)
- ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
- data[6]);
+ pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
else if (type == EHEA_AER_RESTYPE_EQ)
- ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
- data[6]);
+ pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
ehea_dump(data, length, "error data");
}
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
- ehea_error("Cannot allocate rblock memory.");
+ pr_err("Cannot allocate rblock memory\n");
goto out;
}
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
*aerr = rblock[12];
print_error_data(rblock);
} else if (ret == H_R_STATE) {
- ehea_error("No error data available: %llX.", res_handle);
+ pr_err("No error data available: %llX\n", res_handle);
} else
- ehea_error("Error data could not be fetched: %llX", res_handle);
+ pr_err("Error data could not be fetched: %llX\n", res_handle);
free_page((unsigned long)rblock);
out:
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c5527..a937f49d9db7 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.6"
+#define DRV_VERSION "1.4.1.10"
#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -61,6 +61,8 @@ struct enic_port_profile {
char name[PORT_PROFILE_MAX];
u8 instance_uuid[PORT_UUID_MAX];
u8 host_uuid[PORT_UUID_MAX];
+ u8 vf_mac[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
};
/* Per-instance private data structure */
@@ -78,8 +80,10 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
unsigned int mc_count;
+ unsigned int uc_count;
int csum_rx_enabled;
u32 port_mtu;
u32 rx_coalesce_usecs;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..a0af48c51fb3 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -702,7 +702,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
- unsigned int hdr_len = skb_transport_offset(skb);
+ unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0);
@@ -1002,7 +1002,7 @@ static int enic_dev_packet_filter(struct enic *enic, int directed,
return err;
}
-static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
+static int enic_dev_add_addr(struct enic *enic, u8 *addr)
{
int err;
@@ -1013,7 +1013,7 @@ static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
return err;
}
-static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
+static int enic_dev_del_addr(struct enic *enic, u8 *addr)
{
int err;
@@ -1024,29 +1024,19 @@ static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
return err;
}
-/* netif_tx_lock held, BHs disabled */
-static void enic_set_multicast_list(struct net_device *netdev)
+static void enic_add_multicast_addr_list(struct enic *enic)
{
- struct enic *enic = netdev_priv(netdev);
+ struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
- int directed = 1;
- int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
- int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
- int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
- int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
- unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
- if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
+ if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
-
- if (enic->flags != flags) {
- enic->flags = flags;
- enic_dev_packet_filter(enic, directed,
- multicast, broadcast, promisc, allmulti);
}
/* Is there an easier way? Trying to minimize to
@@ -1068,7 +1058,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
mc_addr[j]) == 0)
break;
if (j == mc_count)
- enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
+ enic_dev_del_addr(enic, enic->mc_addr[i]);
}
for (i = 0; i < mc_count; i++) {
@@ -1077,7 +1067,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
enic->mc_addr[j]) == 0)
break;
if (j == enic->mc_count)
- enic_dev_add_multicast_addr(enic, mc_addr[i]);
+ enic_dev_add_addr(enic, mc_addr[i]);
}
/* Save the list to compare against next time
@@ -1089,6 +1079,89 @@ static void enic_set_multicast_list(struct net_device *netdev)
enic->mc_count = mc_count;
}
+static void enic_add_unicast_addr_list(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int uc_count = netdev_uc_count(netdev);
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int i, j;
+
+ if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+ uc_count = ENIC_UNICAST_PERFECT_FILTERS;
+ }
+
+ /* Is there an easier way? Trying to minimize to
+ * calls to add/del unicast addrs. We keep the
+ * addrs from the last call in enic->uc_addr and
+ * look for changes to add/del.
+ */
+
+ i = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (i == uc_count)
+ break;
+ memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
+ }
+
+ for (i = 0; i < enic->uc_count; i++) {
+ for (j = 0; j < uc_count; j++)
+ if (compare_ether_addr(enic->uc_addr[i],
+ uc_addr[j]) == 0)
+ break;
+ if (j == uc_count)
+ enic_dev_del_addr(enic, enic->uc_addr[i]);
+ }
+
+ for (i = 0; i < uc_count; i++) {
+ for (j = 0; j < enic->uc_count; j++)
+ if (compare_ether_addr(uc_addr[i],
+ enic->uc_addr[j]) == 0)
+ break;
+ if (j == enic->uc_count)
+ enic_dev_add_addr(enic, uc_addr[i]);
+ }
+
+ /* Save the list to compare against next time
+ */
+
+ for (i = 0; i < uc_count; i++)
+ memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
+
+ enic->uc_count = uc_count;
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_rx_mode(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int directed = 1;
+ int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+ int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+ int promisc = (netdev->flags & IFF_PROMISC) ||
+ netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
+ int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags |
+ (allmulti ? IFF_ALLMULTI : 0) |
+ (promisc ? IFF_PROMISC : 0);
+
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ enic_dev_packet_filter(enic, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
+
+ if (!promisc) {
+ enic_add_unicast_addr_list(enic);
+ if (!allmulti)
+ enic_add_multicast_addr_list(enic);
+ }
+}
+
/* rtnl lock is held */
static void enic_vlan_rx_register(struct net_device *netdev,
struct vlan_group *vlan_group)
@@ -1158,11 +1231,31 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
return err;
}
+static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ /* Ignore the vf argument for now. We can assume the request
+ * is coming on a vf.
+ */
+ if (is_valid_ether_addr(mac)) {
+ memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
static int enic_set_port_profile(struct enic *enic, u8 *mac)
{
struct vic_provinfo *vp;
u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
char uuid_str[38];
+ char client_mac_str[18];
+ u8 *client_mac;
int err;
err = enic_vnic_dev_deinit(enic);
@@ -1180,46 +1273,63 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
return -EADDRNOTAVAIL;
vp = vic_provinfo_alloc(GFP_KERNEL, oui,
- VIC_PROVINFO_LINUX_TYPE);
+ VIC_PROVINFO_GENERIC_TYPE);
if (!vp)
return -ENOMEM;
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
strlen(enic->pp.name) + 1, enic->pp.name);
+ if (!is_zero_ether_addr(enic->pp.mac_addr))
+ client_mac = enic->pp.mac_addr;
+ else
+ client_mac = mac;
+
+ vic_provinfo_add_tlv(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, client_mac);
+
+ sprintf(client_mac_str, "%pM", client_mac);
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
- ETH_ALEN, mac);
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+ sizeof(client_mac_str), client_mac_str);
if (enic->pp.set & ENIC_SET_INSTANCE) {
sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
sizeof(uuid_str), uuid_str);
}
if (enic->pp.set & ENIC_SET_HOST) {
sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
sizeof(uuid_str), uuid_str);
}
+ os_type = htons(os_type);
+ vic_provinfo_add_tlv(vp,
+ VIC_GENERIC_PROV_TLV_OS_TYPE,
+ sizeof(os_type), &os_type);
+
err = enic_dev_init_prov(enic, vp);
vic_provinfo_free(vp);
if (err)
return err;
+
+ enic->pp.set |= ENIC_SET_APPLIED;
break;
case PORT_REQUEST_DISASSOCIATE:
+ enic->pp.set &= ~ENIC_SET_APPLIED;
break;
default:
return -EINVAL;
}
- enic->pp.set |= ENIC_SET_APPLIED;
return 0;
}
@@ -1227,29 +1337,31 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_port_profile new_pp;
+ int err = 0;
- memset(&enic->pp, 0, sizeof(enic->pp));
+ memset(&new_pp, 0, sizeof(new_pp));
if (port[IFLA_PORT_REQUEST]) {
- enic->pp.set |= ENIC_SET_REQUEST;
- enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+ new_pp.set |= ENIC_SET_REQUEST;
+ new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
}
if (port[IFLA_PORT_PROFILE]) {
- enic->pp.set |= ENIC_SET_NAME;
- memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+ new_pp.set |= ENIC_SET_NAME;
+ memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
- enic->pp.set |= ENIC_SET_INSTANCE;
- memcpy(enic->pp.instance_uuid,
+ new_pp.set |= ENIC_SET_INSTANCE;
+ memcpy(new_pp.instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
- enic->pp.set |= ENIC_SET_HOST;
- memcpy(enic->pp.host_uuid,
+ new_pp.set |= ENIC_SET_HOST;
+ memcpy(new_pp.host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
@@ -1257,21 +1369,39 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (vf != PORT_SELF_VF)
return -EOPNOTSUPP;
- if (!(enic->pp.set & ENIC_SET_REQUEST))
+ if (!(new_pp.set & ENIC_SET_REQUEST))
return -EOPNOTSUPP;
- if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
-
- /* If the interface mac addr hasn't been assigned,
- * assign a random mac addr before setting port-
- * profile.
- */
+ if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
+ /* Special case handling */
+ if (!is_zero_ether_addr(enic->pp.vf_mac))
+ memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
+ } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
+ if (!is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_del_addr(enic, enic->pp.mac_addr);
}
- return enic_set_port_profile(enic, netdev->dev_addr);
+ memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
+
+ err = enic_set_port_profile(enic, netdev->dev_addr);
+ if (err)
+ goto set_port_profile_cleanup;
+
+ if (!is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_add_addr(enic, enic->pp.mac_addr);
+
+set_port_profile_cleanup:
+ memset(enic->pp.vf_mac, 0, ETH_ALEN);
+
+ if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ memset(enic->pp.mac_addr, 0, ETH_ALEN);
+ }
+
+ return err;
}
static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1851,8 +1981,11 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- enic_dev_add_station_addr(enic);
- enic_set_multicast_list(netdev);
+ if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_add_addr(enic, enic->pp.mac_addr);
+ else
+ enic_dev_add_station_addr(enic);
+ enic_set_rx_mode(netdev);
netif_wake_queue(netdev);
@@ -1899,7 +2032,10 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- enic_dev_del_station_addr(enic);
+ if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_del_addr(enic, enic->pp.mac_addr);
+ else
+ enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1962,7 +2098,8 @@ static void enic_poll_controller(struct net_device *netdev)
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- enic_isr_msix_rq(enic->msix_entry[intr].vector, enic);
+ enic_isr_msix_rq(enic->msix_entry[intr].vector,
+ &enic->napi[i]);
}
intr = enic_msix_wq_intr(enic, i);
enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
@@ -2042,7 +2179,7 @@ static int enic_dev_hang_reset(struct enic *enic)
static int enic_set_rsskey(struct enic *enic)
{
- u64 rss_key_buf_pa;
+ dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
union vnic_rss_key rss_key = {
.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2210,7 @@ static int enic_set_rsskey(struct enic *enic)
static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
- u64 rss_cpu_buf_pa;
+ dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
unsigned int i;
int err;
@@ -2328,7 +2465,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_set_mac_address = enic_set_mac_address_dynamic,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
@@ -2337,6 +2475,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_tx_timeout = enic_tx_timeout,
.ndo_set_vf_port = enic_set_vf_port,
.ndo_get_vf_port = enic_get_vf_port,
+#ifdef IFLA_VF_MAX
+ .ndo_set_vf_mac = enic_set_vf_mac,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@@ -2349,7 +2490,8 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = enic_set_mac_address,
- .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2693,7 +2835,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
if (netdev) {
struct enic *enic = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&enic->reset);
unregister_netdev(netdev);
enic_dev_deinit(enic);
vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 9a103d9ef9e2..25be2734c3fe 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -34,6 +34,7 @@
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 7e46e5e8600f..f700f5d9e81d 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -24,14 +24,29 @@
/* Note: String field lengths include null char */
#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
-#define VIC_PROVINFO_LINUX_TYPE 0x2
-
-enum vic_linux_prov_tlv_type {
- VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
- VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
- VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
- VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
- VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+#define VIC_PROVINFO_GENERIC_TYPE 0x4
+
+enum vic_generic_prov_tlv_type {
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
+ VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
+ VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
+ VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
+ VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
+ VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
+ VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
+ VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
+};
+
+enum vic_generic_prov_os_type {
+ VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
+ VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
+ VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
+ VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
};
struct vic_provinfo {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8d..b79d7e1555d5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <net/ethoc.h>
static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @netdev: pointer to network device structure
* @napi: NAPI structure
* @msg_enable: device state flags
- * @rx_lock: receive lock
* @lock: device lock
* @phy: attached PHY
* @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
struct napi_struct napi;
u32 msg_enable;
- spinlock_t rx_lock;
spinlock_t lock;
struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
unsigned int entry;
struct ethoc_bd bd;
- entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
+ entry = priv->num_tx + priv->cur_rx;
ethoc_read_bd(priv, entry, &bd);
- if (bd.stat & RX_BD_EMPTY)
- break;
+ if (bd.stat & RX_BD_EMPTY) {
+ ethoc_ack_irq(priv, INT_MASK_RX);
+ /* If packet (interrupt) came in between checking
+ * BD_EMTPY and clearing the interrupt source, then we
+ * risk missing the packet as the RX interrupt won't
+ * trigger right away when we reenable it; hence, check
+ * BD_EMTPY here again to make sure there isn't such a
+ * packet waiting for us...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & RX_BD_EMPTY)
+ break;
+ }
if (ethoc_update_rx_stats(priv, &bd) == 0) {
int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
bd.stat &= ~RX_BD_STATS;
bd.stat |= RX_BD_EMPTY;
ethoc_write_bd(priv, entry, &bd);
- priv->cur_rx++;
+ if (++priv->cur_rx == priv->num_rx)
+ priv->cur_rx = 0;
}
return count;
}
-static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
{
struct net_device *netdev = dev->netdev;
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
netdev->stats.collisions += (bd->stat >> 4) & 0xf;
netdev->stats.tx_bytes += bd->stat >> 16;
netdev->stats.tx_packets++;
- return 0;
}
-static void ethoc_tx(struct net_device *dev)
+static int ethoc_tx(struct net_device *dev, int limit)
{
struct ethoc *priv = netdev_priv(dev);
+ int count;
+ struct ethoc_bd bd;
- spin_lock(&priv->lock);
+ for (count = 0; count < limit; ++count) {
+ unsigned int entry;
- while (priv->dty_tx != priv->cur_tx) {
- unsigned int entry = priv->dty_tx % priv->num_tx;
- struct ethoc_bd bd;
+ entry = priv->dty_tx & (priv->num_tx-1);
ethoc_read_bd(priv, entry, &bd);
- if (bd.stat & TX_BD_READY)
- break;
- entry = (++priv->dty_tx) % priv->num_tx;
- (void)ethoc_update_tx_stats(priv, &bd);
+ if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
+ ethoc_ack_irq(priv, INT_MASK_TX);
+ /* If interrupt came in between reading in the BD
+ * and clearing the interrupt source, then we risk
+ * missing the event as the TX interrupt won't trigger
+ * right away when we reenable it; hence, check
+ * BD_EMPTY here again to make sure there isn't such an
+ * event pending...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & TX_BD_READY ||
+ (priv->dty_tx == priv->cur_tx))
+ break;
+ }
+
+ ethoc_update_tx_stats(priv, &bd);
+ priv->dty_tx++;
}
if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
netif_wake_queue(dev);
- ethoc_ack_irq(priv, INT_MASK_TX);
- spin_unlock(&priv->lock);
+ return count;
}
static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct ethoc *priv = netdev_priv(dev);
u32 pending;
-
- ethoc_disable_irq(priv, INT_MASK_ALL);
+ u32 mask;
+
+ /* Figure out what triggered the interrupt...
+ * The tricky bit here is that the interrupt source bits get
+ * set in INT_SOURCE for an event irregardless of whether that
+ * event is masked or not. Thus, in order to figure out what
+ * triggered the interrupt, we need to remove the sources
+ * for all events that are currently masked. This behaviour
+ * is not particularly well documented but reasonable...
+ */
+ mask = ethoc_read(priv, INT_MASK);
pending = ethoc_read(priv, INT_SOURCE);
+ pending &= mask;
+
if (unlikely(pending == 0)) {
- ethoc_enable_irq(priv, INT_MASK_ALL);
return IRQ_NONE;
}
ethoc_ack_irq(priv, pending);
+ /* We always handle the dropped packet interrupt */
if (pending & INT_MASK_BUSY) {
dev_err(&dev->dev, "packet dropped\n");
dev->stats.rx_dropped++;
}
- if (pending & INT_MASK_RX) {
- if (napi_schedule_prep(&priv->napi))
- __napi_schedule(&priv->napi);
- } else {
- ethoc_enable_irq(priv, INT_MASK_RX);
+ /* Handle receive/transmit event by switching to polling */
+ if (pending & (INT_MASK_TX | INT_MASK_RX)) {
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ napi_schedule(&priv->napi);
}
- if (pending & INT_MASK_TX)
- ethoc_tx(dev);
-
- ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
return IRQ_HANDLED;
}
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
static int ethoc_poll(struct napi_struct *napi, int budget)
{
struct ethoc *priv = container_of(napi, struct ethoc, napi);
- int work_done = 0;
+ int rx_work_done = 0;
+ int tx_work_done = 0;
+
+ rx_work_done = ethoc_rx(priv->netdev, budget);
+ tx_work_done = ethoc_tx(priv->netdev, budget);
- work_done = ethoc_rx(priv->netdev, budget);
- if (work_done < budget) {
- ethoc_enable_irq(priv, INT_MASK_RX);
+ if (rx_work_done < budget && tx_work_done < budget) {
napi_complete(napi);
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
}
- return work_done;
+ return rx_work_done;
}
static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
{
- unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
struct ethoc *priv = bus->priv;
+ int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
- while (time_before(jiffies, timeout)) {
+ for (i=0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
-
- schedule();
+ usleep_range(100,200);
}
return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
- unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
struct ethoc *priv = bus->priv;
+ int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
- while (time_before(jiffies, timeout)) {
+ for (i=0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
-
- schedule();
+ usleep_range(100,200);
}
return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min_t(unsigned int,
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
- priv->num_tx = max(2, num_bd / 4);
+ if (num_bd < 4) {
+ ret = -ENODEV;
+ goto error;
+ }
+ /* num_tx must be a power of two */
+ priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
+ dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
+ priv->num_tx, priv->num_rx);
+
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdev->dev.platform_data) {
- struct ethoc_platform_data *pdata =
- (struct ethoc_platform_data *)pdev->dev.platform_data;
+ struct ethoc_platform_data *pdata = pdev->dev.platform_data;
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
+ } else {
+ priv->phy_id = -1;
+
+#ifdef CONFIG_OF
+ {
+ const uint8_t* mac;
+
+ mac = of_get_property(pdev->dev.of_node,
+ "local-mac-address",
+ NULL);
+ if (mac)
+ memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+ }
+#endif
}
/* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
- spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
+#ifdef CONFIG_OF
+static struct of_device_id ethoc_match[] = {
+ {
+ .compatible = "opencores,ethoc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ethoc_match);
+#endif
+
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
.remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
.resume = ethoc_resume,
.driver = {
.name = "ethoc",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = ethoc_match,
+#endif
},
};
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc26..50c1213f61fe 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->tx_dmatsk)) {
struct sk_buff *skb;
struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(skb);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
netif_wake_queue(dev);
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
struct bcom_fec_bd *bd;
u32 status, physaddr;
int length;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->rx_dmatsk)) {
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
/* Process the received skb - Drop the spin lock while
* calling into the network stack */
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 ievent;
- unsigned long flags;
ievent = in_be32(&fec->ievent);
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
mpc52xx_fec_reset(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..cd2d72d825df 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define FORCEDETH_VERSION "0.64"
#define DRV_NAME "forcedeth"
@@ -60,18 +63,12 @@
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/system.h>
-#if 0
-#define dprintk printk
-#else
-#define dprintk(x...) do { } while (0)
-#endif
-
#define TX_WORK_PER_LOOP 64
#define RX_WORK_PER_LOOP 64
@@ -186,9 +183,9 @@ enum {
NvRegSlotTime = 0x9c,
#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
-#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
+#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
#define NVREG_SLOTTIME_HALF 0x0000ff00
-#define NVREG_SLOTTIME_DEFAULT 0x00007f00
+#define NVREG_SLOTTIME_DEFAULT 0x00007f00
#define NVREG_SLOTTIME_MASK 0x000000ff
NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@ enum {
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegMgmtUnitGetVersion = 0x204,
-#define NVREG_MGMTUNITGETVERSION 0x01
+#define NVREG_MGMTUNITGETVERSION 0x01
NvRegMgmtUnitVersion = 0x208,
#define NVREG_MGMTUNITVERSION 0x08
NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@ struct ring_desc_ex {
};
union ring_type {
- struct ring_desc* orig;
- struct ring_desc_ex* ex;
+ struct ring_desc *orig;
+ struct ring_desc_ex *ex;
};
#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@ union ring_type {
#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
/* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ_VER1 0x270
-#define NV_PCI_REGSZ_VER2 0x2d4
-#define NV_PCI_REGSZ_VER3 0x604
-#define NV_PCI_REGSZ_MAX 0x604
+#define NV_PCI_REGSZ_VER1 0x270
+#define NV_PCI_REGSZ_VER2 0x2d4
+#define NV_PCI_REGSZ_VER3 0x604
+#define NV_PCI_REGSZ_MAX 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +714,7 @@ static const struct register_test nv_registers_test[] = {
{ NvRegMulticastAddrA, 0xffffffff },
{ NvRegTxWatermark, 0x0ff },
{ NvRegWakeUpFlags, 0x07777 },
- { 0,0 }
+ { 0, 0 }
};
struct nv_skb_map {
@@ -911,7 +908,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
* Power down phy when interface is down (persists through reboot;
* older Linux and other OSes may not power it up again)
*/
-static int phy_power_down = 0;
+static int phy_power_down;
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
@@ -948,7 +945,7 @@ static bool nv_optimized(struct fe_priv *np)
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
- int delay, int delaymax, const char *msg)
+ int delay, int delaymax)
{
u8 __iomem *base = get_hwbase(dev);
@@ -956,11 +953,8 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
do {
udelay(delay);
delaymax -= delay;
- if (delaymax < 0) {
- if (msg)
- printk("%s", msg);
+ if (delaymax < 0)
return 1;
- }
} while ((readl(base + offset) & mask) != target);
return 0;
}
@@ -984,12 +978,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
u8 __iomem *base = get_hwbase(dev);
if (!nv_optimized(np)) {
- if (rxtx_flags & NV_SETUP_RX_RING) {
+ if (rxtx_flags & NV_SETUP_RX_RING)
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
- }
- if (rxtx_flags & NV_SETUP_TX_RING) {
+ if (rxtx_flags & NV_SETUP_TX_RING)
writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
- }
} else {
if (rxtx_flags & NV_SETUP_RX_RING) {
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@ static void free_rings(struct net_device *dev)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.ex, np->ring_addr);
}
- if (np->rx_skb)
- kfree(np->rx_skb);
- if (np->tx_skb)
- kfree(np->tx_skb);
+ kfree(np->rx_skb);
+ kfree(np->tx_skb);
}
static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
writel(reg, base + NvRegMIIControl);
if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
- NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
- dev->name, miireg, addr);
+ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
retval = -1;
} else if (value != MII_READ) {
/* it was a write operation - fewer failures are detectable */
- dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
- dev->name, value, miireg, addr);
retval = 0;
} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
- dev->name, miireg, addr);
retval = -1;
} else {
retval = readl(base + NvRegMIIData);
- dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
- dev->name, miireg, addr, retval);
}
return retval;
@@ -1174,16 +1156,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
unsigned int tries = 0;
miicontrol = BMCR_RESET | bmcr_setup;
- if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
return -1;
- }
/* wait for 500ms */
msleep(500);
/* must wait till reset is deasserted */
while (miicontrol & BMCR_RESET) {
- msleep(10);
+ usleep_range(10000, 20000);
miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
/* FIXME: 100 tries seem excessive */
if (tries++ > 100)
@@ -1192,106 +1173,239 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
return 0;
}
+static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
+{
+ static const struct {
+ int reg;
+ int init;
+ } ri[] = {
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
+ { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
+ { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
+ { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ri); i++) {
+ if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
+{
+ u32 reg;
+ u8 __iomem *base = get_hwbase(dev);
+ u32 powerstate = readl(base + NvRegPowerState2);
+
+ /* need to perform hw phy reset */
+ powerstate |= NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+ reg |= PHY_REALTEK_INIT9;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
+ return PHY_ERROR;
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
+ if (!(reg & PHY_REALTEK_INIT11)) {
+ reg |= PHY_REALTEK_INIT11;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, MII_READ);
+ phy_reserved |= PHY_REALTEK_INIT7;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, phy_reserved))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, MII_READ);
+ phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+ phy_reserved |= PHY_REALTEK_INIT3;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_cicada(struct net_device *dev, struct fe_priv *np,
+ u32 phyinterface)
+{
+ u32 phy_reserved;
+
+ if (phyinterface & PHY_RGMII) {
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
+ if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT5;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT6;
+ if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_vitesse(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+ phy_reserved |= PHY_VITESSE_INIT8;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
+ return PHY_ERROR;
+
+ return 0;
+}
+
static int phy_init(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+ u32 phyinterface;
+ u32 mii_status, mii_control, mii_control_1000, reg;
/* phy errata for E3016 phy */
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
reg &= ~PHY_MARVELL_E3016_INITMASK;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
- printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy write to errata reg failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ if (init_realtek_8211c(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
- np->phy_rev == PHY_REV_REALTEK_8211C) {
- u32 powerstate = readl(base + NvRegPowerState2);
-
- /* need to perform hw phy reset */
- powerstate |= NVREG_POWERSTATE2_PHY_RESET;
- writel(powerstate, base + NvRegPowerState2);
- msleep(25);
-
- powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
- writel(powerstate, base + NvRegPowerState2);
- msleep(25);
-
- reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- reg |= PHY_REALTEK_INIT9;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
- if (!(reg & PHY_REALTEK_INIT11)) {
- reg |= PHY_REALTEK_INIT11;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- phy_reserved |= PHY_REALTEK_INIT7;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
}
}
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
+ reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
- printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy write to advertise failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -1302,7 +1416,8 @@ static int phy_init(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
- mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ mii_control_1000 = mii_rw(dev, np->phyaddr,
+ MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@ static int phy_init(struct net_device *dev)
mii_control_1000 &= ~ADVERTISE_1000FULL;
if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- }
- else
+ } else
np->gigabit = 0;
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@ static int phy_init(struct net_device *dev)
/* start autoneg since we already performed hw reset above */
mii_control |= BMCR_ANRESTART;
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
- printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
} else {
@@ -1334,165 +1450,42 @@ static int phy_init(struct net_device *dev)
* (certain phys need bmcr to be setup with reset)
*/
if (phy_reset(dev, mii_control)) {
- printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy reset failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
/* phy vendor specific configuration */
- if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
- phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
- phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
- phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
- if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
- phy_reserved |= PHY_CICADA_INIT5;
- if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_CICADA) {
- phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
- phy_reserved |= PHY_CICADA_INIT6;
- if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_VITESSE) {
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
- phy_reserved |= PHY_VITESSE_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
- phy_reserved |= PHY_VITESSE_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if ((np->phy_oui == PHY_OUI_CICADA)) {
+ if (init_cicada(dev, np, phyinterface)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_oui == PHY_OUI_VITESSE) {
+ if (init_vitesse(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
- phy_reserved |= PHY_VITESSE_INIT8;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_REALTEK) {
+ } else if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
/* reset could have cleared these out, set them back */
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np) ||
+ init_realtek_8201_cross(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
- if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- phy_reserved |= PHY_REALTEK_INIT7;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
- phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
- phy_reserved |= PHY_REALTEK_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- }
}
/* some phys clear out pause advertisment on reset, set it back */
@@ -1501,12 +1494,10 @@ static int phy_init(struct net_device *dev)
/* restart auto negotiation, power down phy */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
- if (phy_power_down) {
+ if (phy_power_down)
mii_control |= BMCR_PDOWN;
- }
- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
return PHY_ERROR;
- }
return 0;
}
@@ -1517,7 +1508,6 @@ static void nv_start_rx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
/* Already running? Stop it. */
if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@ static void nv_start_rx(struct net_device *dev)
}
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
- rx_ctrl |= NVREG_RCVCTL_START;
- if (np->mac_in_use)
+ rx_ctrl |= NVREG_RCVCTL_START;
+ if (np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
- dev->name, np->duplex, np->linkspeed);
pci_push(base);
}
@@ -1541,15 +1529,15 @@ static void nv_stop_rx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
if (!np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_START;
else
rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
- reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
- NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
- KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
+ if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
+ NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: ReceiverStatus remained busy\n",
+ __func__);
udelay(NV_RXSTOP_DELAY2);
if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@ static void nv_start_tx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
- dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
tx_ctrl |= NVREG_XMITCTL_START;
if (np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@ static void nv_stop_tx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
- dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
if (!np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_START;
else
tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
writel(tx_ctrl, base + NvRegTransmitterControl);
- reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
- NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
- KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
+ if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
+ NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: TransmitterStatus remained busy\n",
+ __func__);
udelay(NV_TXSTOP_DELAY2);
if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@ static void nv_txrx_reset(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@ static void nv_mac_reset(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 temp1, temp2, temp3;
- dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
-
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
@@ -1745,7 +1729,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
static int nv_alloc_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- struct ring_desc* less_rx;
+ struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else {
+ } else
return 1;
- }
}
return 0;
}
@@ -1777,7 +1760,7 @@ static int nv_alloc_rx(struct net_device *dev)
static int nv_alloc_rx_optimized(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- struct ring_desc_ex* less_rx;
+ struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else {
+ } else
return 1;
- }
}
return 0;
}
@@ -2018,24 +2000,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
/* Known Good seed sets */
static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
- {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
- {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
- {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
- {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
- {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
- {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
- {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
- {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+ {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+ {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+ {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
+ {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
static void nv_gear_backoff_reseed(struct net_device *dev)
{
@@ -2083,13 +2065,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
- writel(temp,base + NvRegBackOffControl);
+ writel(temp, base + NvRegBackOffControl);
- /* Setup seeds for all gear LFSRs. */
+ /* Setup seeds for all gear LFSRs. */
get_random_bytes(&seedset, sizeof(seedset));
seedset = seedset % BACKOFF_SEEDSET_ROWS;
- for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
- {
+ for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
temp |= main_seedset[seedset][i-1] & 0x3ff;
temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
- struct ring_desc* put_tx;
- struct ring_desc* start_tx;
- struct ring_desc* prev_tx;
- struct nv_skb_map* prev_tx_ctx;
+ struct ring_desc *put_tx;
+ struct ring_desc *start_tx;
+ struct ring_desc *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
unsigned long flags;
/* add fragments to entries count */
@@ -2204,18 +2185,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
- dev->name, entries, tx_flags_extra);
- {
- int j;
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
-
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2233,11 +2202,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
- struct ring_desc_ex* put_tx;
- struct ring_desc_ex* start_tx;
- struct ring_desc_ex* prev_tx;
- struct nv_skb_map* prev_tx_ctx;
- struct nv_skb_map* start_tx_ctx;
+ struct ring_desc_ex *put_tx;
+ struct ring_desc_ex *start_tx;
+ struct ring_desc_ex *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
+ struct nv_skb_map *start_tx_ctx;
unsigned long flags;
/* add fragments to entries count */
@@ -2355,18 +2324,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
spin_unlock_irqrestore(&np->lock, flags);
- dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
- dev->name, entries, tx_flags_extra);
- {
- int j;
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
-
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2399,15 +2356,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
- struct ring_desc* orig_get_tx = np->get_tx.orig;
+ struct ring_desc *orig_get_tx = np->get_tx.orig;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
(tx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
- dev->name, flags);
-
nv_unmap_txskb(np, np->get_tx_ctx);
if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
- struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+ struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
(tx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
- dev->name, flags);
-
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
np->get_tx_ctx->skb = NULL;
tx_work++;
- if (np->tx_limit) {
+ if (np->tx_limit)
nv_tx_flip_ownership(dev);
- }
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
+ int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
-
- {
- int i;
-
- printk(KERN_INFO "%s: Ring at %lx\n",
- dev->name, (unsigned long)np->ring_addr);
- printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
- for (i=0;i<=np->register_size;i+= 32) {
- printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
- for (i=0;i<np->tx_ring_size;i+= 4) {
- if (!nv_optimized(np)) {
- printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
- printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
- }
+ netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
+ netdev_info(dev,
+ "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
}
}
@@ -2616,15 +2565,13 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
- if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
- protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
+ if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
+ protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
hdrlen = VLAN_HLEN;
} else {
- protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
+ protolen = ntohs(((struct ethhdr *)packet)->h_proto);
hdrlen = ETH_HLEN;
}
- dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
- dev->name, datalen, protolen, hdrlen);
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks possible */
@@ -2635,26 +2582,18 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
/* more data on wire than in 802 header, trim of
* additional data.
*/
- dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
- dev->name, protolen);
return protolen;
} else {
/* less data on wire than mentioned in header.
* Discard the packet.
*/
- dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
- dev->name);
return -1;
}
} else {
/* short packet. Accept only if 802 values are also short */
if (protolen > ETH_ZLEN) {
- dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
- dev->name);
return -1;
}
- dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
- dev->name, datalen);
return datalen;
}
}
@@ -2667,13 +2606,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
struct sk_buff *skb;
int len;
- while((np->get_rx.orig != np->put_rx.orig) &&
+ while ((np->get_rx.orig != np->put_rx.orig) &&
!((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
(rx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
- dev->name, flags);
-
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
- {
- int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
/* look at what we actually got: */
if (np->desc_ver == DESC_VER_1) {
if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
- if (flags & NV_RX_SUBSTRACT1) {
+ if (flags & NV_RX_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2745,9 +2670,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2771,8 +2695,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
- dev->name, len, skb->protocol);
napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
struct sk_buff *skb;
int len;
- while((np->get_rx.ex != np->put_rx.ex) &&
+ while ((np->get_rx.ex != np->put_rx.ex) &&
!((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
(rx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
- dev->name, flags);
-
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
- {
- int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
/* look at what we actually got: */
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2858,9 +2766,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
prefetch(skb->data);
- dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
- dev->name, len, skb->protocol);
-
if (likely(!np->vlangrp)) {
napi_gro_receive(&np->napi, skb);
} else {
@@ -2949,7 +2854,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
struct fe_priv *np = netdev_priv(dev);
- struct sockaddr *macaddr = (struct sockaddr*)addr;
+ struct sockaddr *macaddr = (struct sockaddr *)addr;
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@ static void nv_set_multicast(struct net_device *dev)
writel(mask[0], base + NvRegMulticastMaskA);
writel(mask[1], base + NvRegMulticastMaskB);
writel(pff, base + NvRegPacketFilterFlags);
- dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
- dev->name);
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
}
@@ -3152,8 +3055,6 @@ static int nv_update_linkspeed(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (!(mii_status & BMSR_LSTATUS)) {
- dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
- dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
@@ -3161,8 +3062,6 @@ static int nv_update_linkspeed(struct net_device *dev)
}
if (np->autoneg == 0) {
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
- dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
@@ -3185,14 +3084,11 @@ static int nv_update_linkspeed(struct net_device *dev)
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
- dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
goto set_speed;
}
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
- dev->name, adv, lpa);
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@ static int nv_update_linkspeed(struct net_device *dev)
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
- dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
newdup = 1;
goto set_speed;
@@ -3224,7 +3118,6 @@ static int nv_update_linkspeed(struct net_device *dev)
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
- dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
@@ -3233,9 +3126,6 @@ set_speed:
if (np->duplex == newdup && np->linkspeed == newls)
return retval;
- dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
- dev->name, np->linkspeed, np->duplex, newls, newdup);
-
np->duplex = newdup;
np->linkspeed = newls;
@@ -3302,7 +3192,7 @@ set_speed:
}
writel(txreg, base + NvRegTxWatermark);
- writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@ set_speed:
/* setup pause frame */
if (np->duplex != 0) {
if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
- adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
- lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
switch (adv_pause) {
case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@ set_speed:
}
break;
case ADVERTISE_PAUSE_ASYM:
- if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
- {
+ if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
- }
break;
- case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
- if (lpa_pause & LPA_PAUSE_CAP)
- {
+ case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
+ if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
if (lpa_pause == LPA_PAUSE_ASYM)
- {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
- }
break;
}
} else {
@@ -3361,14 +3246,14 @@ static void nv_linkchange(struct net_device *dev)
if (nv_update_linkspeed(dev)) {
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
- printk(KERN_INFO "%s: link up.\n", dev->name);
+ netdev_info(dev, "link up\n");
nv_txrx_gate(dev, false);
nv_start_rx(dev);
}
} else {
if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
nv_txrx_gate(dev, true);
nv_stop_rx(dev);
}
@@ -3382,11 +3267,9 @@ static void nv_link_irq(struct net_device *dev)
miistat = readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
- dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
nv_linkchange(dev);
- dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
}
static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
if (!(np->events & np->irqmask))
return IRQ_NONE;
@@ -3460,8 +3340,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
-
return IRQ_HANDLED;
}
@@ -3476,8 +3354,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
if (!(np->events & np->irqmask))
return IRQ_NONE;
@@ -3498,7 +3373,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
}
@@ -3512,12 +3386,9 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3536,12 +3407,12 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3553,7 +3424,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int rx_count, tx_work=0, rx_work=0;
+ int rx_count, tx_work = 0, rx_work = 0;
do {
if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3655,11 +3523,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3673,12 +3541,9 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3723,12 +3588,12 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3740,8 +3605,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
u8 __iomem *base = get_hwbase(dev);
u32 events;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
}
pci_push(base);
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & NVREG_IRQ_TIMER))
return IRQ_RETVAL(0);
@@ -3760,8 +3622,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
np->intr_test = 1;
spin_unlock(&np->lock);
- dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
-
return IRQ_RETVAL(1);
}
@@ -3776,17 +3636,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
* the remaining 8 interrupts.
*/
for (i = 0; i < 8; i++) {
- if ((irqmask >> i) & 0x1) {
+ if ((irqmask >> i) & 0x1)
msixmap |= vector << (i << 2);
- }
}
writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
msixmap = 0;
for (i = 0; i < 8; i++) {
- if ((irqmask >> (i + 8)) & 0x1) {
+ if ((irqmask >> (i + 8)) & 0x1)
msixmap |= vector << (i << 2);
- }
}
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
@@ -3809,17 +3667,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- }
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
+ if (ret == 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for rx %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
@@ -3828,7 +3688,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
sprintf(np->name_tx, "%s-tx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for tx %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
@@ -3837,7 +3699,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
sprintf(np->name_other, "%s-other", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for link %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_tx;
@@ -3851,7 +3715,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
} else {
/* Request irq for all interrupts */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
@@ -3864,11 +3730,13 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ ret = pci_enable_msi(np->pci_dev);
+ if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
dev->irq = np->pci_dev->irq;
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ netdev_info(dev, "request_irq failed %d\n",
+ ret);
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@ static void nv_free_irq(struct net_device *dev)
int i;
if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
free_irq(np->msi_x_entry[i].vector, dev);
- }
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
@@ -3954,7 +3821,7 @@ static void nv_do_nic_poll(unsigned long data)
if (np->recover_error) {
np->recover_error = 0;
- printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
+ netdev_info(dev, "MAC in recoverable error state\n");
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@ static void nv_do_nic_poll(unsigned long data)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +3972,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_carrier_ok(dev)) {
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+ switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
ecmd->speed = SPEED_10;
break;
@@ -4250,14 +4117,14 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev))
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick,
* and cause autoneg to start */
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4306,7 +4173,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->phy_oui == PHY_OUI_MARVELL) {
/* reset the phy in order for forced mode settings to stick */
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4344,7 +4211,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
regs->version = FORCEDETH_REGS_VER;
spin_lock_irq(&np->lock);
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
rbuf[i] = readl(base + i*sizeof(u32));
spin_unlock_irq(&np->lock);
}
@@ -4368,7 +4235,7 @@ static int nv_nway_reset(struct net_device *dev)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4243,7 @@ static int nv_nway_reset(struct net_device *dev)
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick*/
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4464,10 +4331,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
rxtx_ring, ring_addr);
}
- if (rx_skbuff)
- kfree(rx_skbuff);
- if (tx_skbuff)
- kfree(tx_skbuff);
+
+ kfree(rx_skbuff);
+ kfree(tx_skbuff);
goto exit;
}
@@ -4491,14 +4357,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
np->tx_ring_size = ring->tx_pending;
if (!nv_optimized(np)) {
- np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+ np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
- np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+ np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skb = (struct nv_skb_map*)rx_skbuff;
- np->tx_skb = (struct nv_skb_map*)tx_skbuff;
+ np->rx_skb = (struct nv_skb_map *)rx_skbuff;
+ np->tx_skb = (struct nv_skb_map *)tx_skbuff;
np->ring_addr = ring_addr;
memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4381,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
/* reinit nic view of the queues */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4416,11 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
if ((!np->autoneg && np->duplex == 0) ||
(np->autoneg && !pause->autoneg && np->duplex == 0)) {
- printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
- dev->name);
+ netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
return -EINVAL;
}
if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
- printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+ netdev_info(dev, "hardware does not support tx pause frames\n");
return -EINVAL;
}
@@ -4590,7 +4455,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (netif_running(dev))
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4706,7 @@ static int nv_loopback_test(struct net_device *dev)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
@@ -4852,8 +4717,7 @@ static int nv_loopback_test(struct net_device *dev)
pkt_len = ETH_DATA_LEN;
tx_skb = dev_alloc_skb(pkt_len);
if (!tx_skb) {
- printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
- " of %s\n", dev->name);
+ netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
@@ -4893,29 +4757,22 @@ static int nv_loopback_test(struct net_device *dev)
if (flags & NV_RX_ERROR)
ret = 0;
} else {
- if (flags & NV_RX2_ERROR) {
+ if (flags & NV_RX2_ERROR)
ret = 0;
- }
}
if (ret) {
if (len != pkt_len) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
- dev->name, len, pkt_len);
} else {
rx_skb = np->rx_skb[0].skb;
for (i = 0; i < pkt_len; i++) {
if (rx_skb->data[i] != (u8)(i & 0xff)) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
- dev->name, i);
break;
}
}
}
- } else {
- dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4815,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
- if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
- } else {
+ else
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
- }
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
@@ -5003,7 +4859,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4962,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
np->mgmt_sema = 1;
return 1;
- }
- else
+ } else
udelay(50);
}
@@ -5167,8 +5022,6 @@ static int nv_open(struct net_device *dev)
int oom, i;
u32 low;
- dprintk(KERN_DEBUG "nv_open: begin\n");
-
/* power up phy */
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5057,7 @@ static int nv_open(struct net_device *dev)
/* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5069,11 @@ static int nv_open(struct net_device *dev)
writel(np->vlanctl_bits, base + NvRegVlanControl);
pci_push(base);
writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
- reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
- NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
- KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
+ if (reg_delay(dev, NvRegUnknownSetupReg5,
+ NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
+ NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
+ netdev_info(dev,
+ "%s: SetupReg5, Bit 31 remained off\n", __func__);
writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5106,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
else
writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
- }
- else
+ } else
writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5117,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
i = readl(base + NvRegPowerState);
- if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
+ if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
pci_push(base);
@@ -5276,9 +5130,8 @@ static int nv_open(struct net_device *dev)
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
- if (nv_request_irq(dev, 0)) {
+ if (nv_request_irq(dev, 0))
goto out_drain;
- }
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5149,6 @@ static int nv_open(struct net_device *dev)
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
@@ -5309,7 +5161,7 @@ static int nv_open(struct net_device *dev)
if (ret) {
netif_carrier_on(dev);
} else {
- printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
+ netdev_info(dev, "no link during initialization\n");
netif_carrier_off(dev);
}
if (oom)
@@ -5352,7 +5204,6 @@ static int nv_close(struct net_device *dev)
base = get_hwbase(dev);
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
- dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock);
@@ -5421,8 +5272,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
static int printed_version;
if (!printed_version++)
- printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
- " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
+ pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
+ FORCEDETH_VERSION);
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
@@ -5465,10 +5316,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = -EINVAL;
addr = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
- pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
- pci_resource_len(pci_dev, i),
- pci_resource_flags(pci_dev, i));
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
pci_resource_len(pci_dev, i) >= np->register_size) {
addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5323,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
}
if (i == DEVICE_COUNT_RESOURCE) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "Couldn't find register window\n");
+ dev_info(&pci_dev->dev, "Couldn't find register window\n");
goto out_relreg;
}
@@ -5493,13 +5339,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (dma_64bit) {
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
- dev_printk(KERN_INFO, &pci_dev->dev,
- "64-bit DMA failed, using 32-bit addressing\n");
+ dev_info(&pci_dev->dev,
+ "64-bit DMA failed, using 32-bit addressing\n");
else
dev->features |= NETIF_F_HIGHDMA;
if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
+ dev_info(&pci_dev->dev,
+ "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
}
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5466,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
- printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
+ dev_dbg(&pci_dev->dev,
+ "%s: set workaround bit for reversed mac addr\n",
+ __func__);
}
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -5629,17 +5477,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
- dev_printk(KERN_ERR, &pci_dev->dev,
- "Invalid Mac address detected: %pM\n",
- dev->dev_addr);
- dev_printk(KERN_ERR, &pci_dev->dev,
- "Please complain to your hardware vendor. Switching to a random MAC.\n");
+ dev_err(&pci_dev->dev,
+ "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
+ dev->dev_addr);
random_ether_addr(dev->dev_addr);
+ dev_err(&pci_dev->dev,
+ "Using random MAC address: %pM\n", dev->dev_addr);
}
- dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
- pci_name(pci_dev), dev->dev_addr);
-
/* set mac address */
nv_copy_mac_to_hw(dev);
@@ -5663,16 +5508,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
writel(powerstate, base + NvRegPowerState2);
}
- if (np->desc_ver == DESC_VER_1) {
+ if (np->desc_ver == DESC_VER_1)
np->tx_flags = NV_TX_VALID;
- } else {
+ else
np->tx_flags = NV_TX2_VALID;
- }
np->msi_flags = 0;
- if ((id->driver_data & DEV_HAS_MSI) && msi) {
+ if ((id->driver_data & DEV_HAS_MSI) && msi)
np->msi_flags |= NV_MSI_CAPABLE;
- }
+
if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
@@ -5702,11 +5546,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
if (id->driver_data & DEV_NEED_LINKTIMER) {
- dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
np->need_linktimer = 1;
np->link_timeout = jiffies + LINK_TIMEOUT;
} else {
- dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
np->need_linktimer = 0;
}
@@ -5735,19 +5577,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
nv_mgmt_acquire_sema(dev) &&
nv_mgmt_get_version(dev)) {
np->mac_in_use = 1;
- if (np->mgmt_version > 0) {
+ if (np->mgmt_version > 0)
np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
- }
- dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
- pci_name(pci_dev), np->mac_in_use);
/* management unit setup the phy already? */
if (np->mac_in_use &&
((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
NVREG_XMITCTL_SYNC_PHY_INIT)) {
/* phy is inited by mgmt unit */
phyinitialized = 1;
- dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
- pci_name(pci_dev));
} else {
/* we need to init the phy */
}
@@ -5773,8 +5610,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
- dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
- pci_name(pci_dev), id1, id2, phyaddr);
np->phyaddr = phyaddr;
np->phy_oui = id1 | id2;
@@ -5788,8 +5623,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
break;
}
if (i == 33) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "open: Could not find a valid PHY.\n");
+ dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
goto out_error;
}
@@ -5799,9 +5633,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
} else {
/* see if it is a gigabit phy */
u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
- if (mii_status & PHY_GIGABIT) {
+ if (mii_status & PHY_GIGABIT)
np->gigabit = PHY_GIGABIT;
- }
}
/* set default link speed settings */
@@ -5811,37 +5644,27 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = register_netdev(dev);
if (err) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "unable to register netdev: %d\n", err);
+ dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
goto out_error;
}
- dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
- "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- dev->name,
- np->phy_oui,
- np->phyaddr,
- dev->dev_addr[0],
- dev->dev_addr[1],
- dev->dev_addr[2],
- dev->dev_addr[3],
- dev->dev_addr[4],
- dev->dev_addr[5]);
-
- dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
- dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
- dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
- "csum " : "",
- dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
- "vlan " : "",
- id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
- id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
- id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
- np->gigabit == PHY_GIGABIT ? "gbit " : "",
- np->need_linktimer ? "lnktim " : "",
- np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
- np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
- np->desc_ver);
+ dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
+ dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
+
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
+ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
+ "csum " : "",
+ dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+ "vlan " : "",
+ id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
+ id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
+ id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
+ np->gigabit == PHY_GIGABIT ? "gbit " : "",
+ np->need_linktimer ? "lnktim " : "",
+ np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
+ np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
+ np->desc_ver);
return 0;
@@ -5931,13 +5754,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
int i;
if (netif_running(dev)) {
- // Gross.
+ /* Gross. */
nv_close(dev);
}
netif_device_detach(dev);
/* save non-pci configuration space */
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32));
pci_save_state(pdev);
@@ -5960,7 +5783,7 @@ static int nv_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
/* restore non-pci configuration space */
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5813,8 @@ static void nv_shutdown(struct pci_dev *pdev)
* If we really go for poweroff, we must not restore the MAC,
* otherwise the MAC for WOL will be reversed at least on some boards.
*/
- if (system_state != SYSTEM_POWER_OFF) {
+ if (system_state != SYSTEM_POWER_OFF)
nv_restore_mac_addr(pdev);
- }
pci_disable_device(pdev);
/*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1246a7..45c4b7bfcf39 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,8 @@ void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
@@ -577,11 +578,10 @@ static int gfar_parse_group(struct device_node *np,
irq_of_parse_and_map(np, 1);
priv->gfargrp[priv->num_grps].interruptError =
irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
- priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
- priv->gfargrp[priv->num_grps].interruptError < 0) {
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
return -EINVAL;
- }
}
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
@@ -3095,10 +3095,10 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
+ static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
- gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
+ gfar_set_mac_for_addr(dev, idx, zero_arr);
}
/* Set the appropriate hash bit for the given addr */
@@ -3133,7 +3133,8 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
/* There are multiple MAC Address register pairs on some controllers
* This function sets the numth pair to a given address
*/
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
spin_unlock_irqrestore(&priv->bflock, flags);
return 0;
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index d15d2f2ba78e..ef2014375e62 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -162,9 +162,9 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
/* Snarf the interrupt now. Someday this could be moved to open(). */
if (dev->irq < 2) {
- int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
- int irq_8list[] = { 7, 5, 3, 4, 9, 0};
- int *irqp = wordmode ? irq_16list : irq_8list;
+ static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ const int *irqp = wordmode ? irq_16list : irq_8list;
do {
int irq = *irqp;
if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..8f11d29a5828 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
@@ -2951,7 +2950,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
unregister_netdev(dev->ndev);
- flush_scheduled_work();
+ cancel_work_sync(&dev->reset_work);
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
tah_detach(dev->tah_dev, dev->tah_port);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c454b45ca7ec..5522d459654c 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -729,11 +729,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 netdev_get_link(struct net_device *dev)
-{
- return 1;
-}
-
static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
@@ -918,7 +913,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
- .get_link = netdev_get_link,
+ .get_link = ethtool_op_get_link,
.set_tx_csum = ibmveth_set_tx_csum,
.get_rx_csum = ibmveth_get_rx_csum,
.set_rx_csum = ibmveth_set_rx_csum,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675c5b8b..124dac4532b2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -36,22 +36,10 @@
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
-#define TX_TIMEOUT (2*HZ)
-
#define TX_Q_LIMIT 32
struct ifb_private {
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
- /* mostly debug stats leave in for now */
- unsigned long st_task_enter; /* tasklet entered */
- unsigned long st_txq_refl_try; /* transmit queue refill attempt */
- unsigned long st_rxq_enter; /* receive queue entered */
- unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
- unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
- unsigned long st_rx_frm_egr; /* received from egress path */
- unsigned long st_rx_frm_ing; /* received from ingress path */
- unsigned long st_rxq_check;
- unsigned long st_rxq_rsch;
struct sk_buff_head rq;
struct sk_buff_head tq;
};
@@ -73,24 +61,17 @@ static void ri_tasklet(unsigned long dev)
struct sk_buff *skb;
txq = netdev_get_tx_queue(_dev, 0);
- dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) {
- dp->st_txq_refl_try++;
if (__netif_tx_trylock(txq)) {
- dp->st_rxq_enter++;
- while ((skb = skb_dequeue(&dp->rq)) != NULL) {
- skb_queue_tail(&dp->tq, skb);
- dp->st_rx2tx_tran++;
- }
+ skb_queue_splice_tail_init(&dp->rq, &dp->tq);
__netif_tx_unlock(txq);
} else {
/* reschedule */
- dp->st_rxq_notenter++;
goto resched;
}
}
- while ((skb = skb_dequeue(&dp->tq)) != NULL) {
+ while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
@@ -104,30 +85,28 @@ static void ri_tasklet(unsigned long dev)
rcu_read_unlock();
dev_kfree_skb(skb);
stats->tx_dropped++;
+ if (skb_queue_len(&dp->tq) != 0)
+ goto resched;
break;
}
rcu_read_unlock();
skb->skb_iif = _dev->ifindex;
if (from & AT_EGRESS) {
- dp->st_rx_frm_egr++;
dev_queue_xmit(skb);
} else if (from & AT_INGRESS) {
- dp->st_rx_frm_ing++;
skb_pull(skb, skb->dev->hard_header_len);
- netif_rx(skb);
+ netif_receive_skb(skb);
} else
BUG();
}
if (__netif_tx_trylock(txq)) {
- dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0;
if (netif_queue_stopped(_dev))
netif_wake_queue(_dev);
} else {
- dp->st_rxq_rsch++;
__netif_tx_unlock(txq);
goto resched;
}
@@ -182,7 +161,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- skb_queue_tail(&dp->rq, skb);
+ __skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
tasklet_schedule(&dp->ifb_tasklet);
@@ -197,8 +176,8 @@ static int ifb_close(struct net_device *dev)
tasklet_kill(&dp->ifb_tasklet);
netif_stop_queue(dev);
- skb_queue_purge(&dp->rq);
- skb_queue_purge(&dp->tq);
+ __skb_queue_purge(&dp->rq);
+ __skb_queue_purge(&dp->tq);
return 0;
}
@@ -207,8 +186,8 @@ static int ifb_open(struct net_device *dev)
struct ifb_private *dp = netdev_priv(dev);
tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
- skb_queue_head_init(&dp->rq);
- skb_queue_head_init(&dp->tq);
+ __skb_queue_head_init(&dp->rq);
+ __skb_queue_head_init(&dp->tq);
netif_start_queue(dev);
return 0;
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 62222796a8b3..6319ed902bc0 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -419,6 +419,9 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -580,11 +583,15 @@
/* Mask bits for fields in Word 0x1a of the NVM */
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH 11
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - Microwire */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index d83b77fa4038..6b5cc2cc453d 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -445,31 +445,112 @@ out:
}
/**
- * igb_read_part_num - Read device part number
+ * igb_read_part_string - Read device part number
* @hw: pointer to the HW structure
* @part_num: pointer to device part number
+ * @part_num_size: size of part number buffer
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in part_num.
**/
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
{
- s32 ret_val;
+ s32 ret_val;
u16 nvm_data;
+ u16 pointer;
+ u16 offset;
+ u16 length;
+
+ if (part_num == NULL) {
+ hw_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
- *part_num = (u32)(nvm_data << 16);
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pointer is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ hw_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (part_num_size < 11) {
+ hw_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pointer */
+ part_num[0] = (nvm_data >> 12) & 0xF;
+ part_num[1] = (nvm_data >> 8) & 0xF;
+ part_num[2] = (nvm_data >> 4) & 0xF;
+ part_num[3] = nvm_data & 0xF;
+ part_num[4] = (pointer >> 12) & 0xF;
+ part_num[5] = (pointer >> 8) & 0xF;
+ part_num[6] = '-';
+ part_num[7] = 0;
+ part_num[8] = (pointer >> 4) & 0xF;
+ part_num[9] = pointer & 0xF;
+
+ /* put a null character on the end of our string */
+ part_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (part_num[offset] < 0xA)
+ part_num[offset] += '0';
+ else if (part_num[offset] < 0x10)
+ part_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
- *part_num |= nvm_data;
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if part_num buffer is big enough */
+ if (part_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pointer++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ part_num[offset * 2] = (u8)(nvm_data >> 8);
+ part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ part_num[offset * 2] = '\0';
out:
return ret_val;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 1041c34dcbe1..29c956a84bd0 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -32,6 +32,8 @@ s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ddd036a78999..6694bf3e5ad9 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1757,11 +1757,12 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
- u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
- {IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D};
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17ac..62348fc60e53 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1729,12 +1729,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
struct igb_adapter *adapter;
struct e1000_hw *hw;
u16 eeprom_data = 0;
+ s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u16 eeprom_apme_mask = IGB_EEPROM_APME;
- u32 part_num;
+ u8 part_str[E1000_PBANUM_LENGTH];
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -2000,10 +2001,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
"unknown"),
netdev->dev_addr);
- igb_read_part_num(hw, &part_num);
- dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
- (part_num >> 8), (part_num & 0xff));
-
+ ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strcpy(part_str, "Unknown");
+ dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
adapter->msix_entries ? "MSI-X" :
@@ -2049,13 +2050,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /* flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled */
+ /*
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable watchdog from being rescheduled.
+ */
set_bit(__IGB_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2436,10 +2440,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2590,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
desc_len = sizeof(union e1000_adv_rx_desc);
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d9..0fa3db3dd8b6 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 Intel Corporation.
+# Copyright(c) 2009 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518a..79f2604673fe 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcbe..ed6e3d910247 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -110,11 +110,6 @@ static int igbvf_get_settings(struct net_device *netdev,
return 0;
}
-static u32 igbvf_get_link(struct net_device *netdev)
-{
- return netif_carrier_ok(netdev);
-}
-
static int igbvf_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -515,7 +510,7 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.get_msglevel = igbvf_get_msglevel,
.set_msglevel = igbvf_set_msglevel,
.nway_reset = igbvf_nway_reset,
- .get_link = igbvf_get_link,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = igbvf_get_eeprom_len,
.get_eeprom = igbvf_get_eeprom,
.set_eeprom = igbvf_set_eeprom,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc717..9d4d63e536d4 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@ struct igbvf_buffer {
unsigned int page_offset;
};
};
- struct page *page;
};
union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901dc..3d6f4cc3998a 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb5..c2883c45d477 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97bb..4fb023bce785 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
#include "igbvf.h"
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.8-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static const char igbvf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
@@ -429,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
int size;
size = sizeof(struct igbvf_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
int size, desc_len;
size = sizeof(struct igbvf_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
desc_len = sizeof(union e1000_adv_rx_desc);
@@ -1851,8 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
-
mac->ops.get_link_up_info(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -1862,11 +1859,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
/* maybe add some timeout factor ? */
break;
}
@@ -2830,13 +2825,14 @@ static void __devexit igbvf_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
/*
- * flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
unregister_netdev(netdev);
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0a..77e18d3d6b15 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964c..0cc13c6ed418 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a67..c36ea21f17fa 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
"D-Link NIC IP1000A"
};
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
- { PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
{ 0, }
};
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index 37ab8c855719..8ff084f1d236 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -199,7 +199,7 @@ static int act200l_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
- u8 control[9] = {
+ static const u8 control[9] = {
ACT200L_REG15,
ACT200L_REG13 | ACT200L_SHDW,
ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b626cccbccd1..f81d944fc360 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -818,9 +818,9 @@ toshoboe_probe (struct toshoboe_cb *self)
{
int i, j, n;
#ifdef USE_MIR
- int bauds[] = { 9600, 115200, 4000000, 1152000 };
+ static const int bauds[] = { 9600, 115200, 4000000, 1152000 };
#else
- int bauds[] = { 9600, 115200, 4000000 };
+ static const int bauds[] = { 9600, 115200, 4000000 };
#endif
unsigned long flags;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 74b20f179cea..cc821de2c966 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -959,7 +959,7 @@ static void mcs_disconnect(struct usb_interface *intf)
if (!mcs)
return;
- flush_scheduled_work();
+ cancel_work_sync(&mcs->work);
unregister_netdev(mcs->netdev);
free_netdev(mcs->netdev);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
/* Baud Rate Error Correction x 10000 */
u32 rate_err_array[] = {
- 0000, 0625, 1250, 1875,
+ 0, 625, 1250, 1875,
2500, 3125, 3750, 4375,
5000, 5625, 6250, 6875,
7500, 8125, 8750, 9375,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2e..9ece1fd9889d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -885,17 +885,8 @@ static void veth_stop_connection(struct veth_lpar_connection *cnx)
veth_kick_statemachine(cnx);
spin_unlock_irq(&cnx->lock);
- /* There's a slim chance the reset code has just queued the
- * statemachine to run in five seconds. If so we need to cancel
- * that and requeue the work to run now. */
- if (cancel_delayed_work(&cnx->statemachine_wq)) {
- spin_lock_irq(&cnx->lock);
- veth_kick_statemachine(cnx);
- spin_unlock_irq(&cnx->lock);
- }
-
- /* Wait for the state machine to run. */
- flush_scheduled_work();
+ /* ensure the statemachine runs now and waits for its completion */
+ flush_delayed_work_sync(&cnx->statemachine_wq);
}
static void veth_destroy_connection(struct veth_lpar_connection *cnx)
@@ -1009,15 +1000,10 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return 0;
}
-static u32 veth_get_link(struct net_device *dev)
-{
- return 1;
-}
-
static const struct ethtool_ops ops = {
.get_drvinfo = veth_get_drvinfo,
.get_settings = veth_get_settings,
- .get_link = veth_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops veth_netdev_ops = {
@@ -1605,7 +1591,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
veth_dev[i] = dev;
- port = (struct veth_port*)netdev_priv(dev);
+ port = netdev_priv(dev);
/* Start the state machine on each connection on this vlan. If we're
* the first dev to do so this will commence link negotiation */
@@ -1658,15 +1644,14 @@ static void __exit veth_module_cleanup(void)
/* Disconnect our "irq" to stop events coming from the Hypervisor. */
HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
- /* Make sure any work queued from Hypervisor callbacks is finished. */
- flush_scheduled_work();
-
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
cnx = veth_cnx[i];
if (!cnx)
continue;
+ /* Cancel work queued from Hypervisor callbacks */
+ cancel_delayed_work_sync(&cnx->statemachine_wq);
/* Remove the connection from sysfs */
kobject_del(&cnx->kobject);
/* Drop the driver's reference to the connection */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2a..5639cccb4935 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -98,6 +98,8 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
static void ixgb_tx_timeout(struct net_device *dev);
static void ixgb_tx_timeout_task(struct work_struct *work);
+static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
+static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
static void ixgb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
@@ -525,7 +527,7 @@ ixgb_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->tx_timeout_task);
unregister_netdev(netdev);
@@ -669,13 +671,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * txdr->count;
- txdr->buffer_info = vmalloc(size);
+ txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info) {
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
- memset(txdr->buffer_info, 0, size);
/* round up to nearest 4K */
@@ -759,13 +760,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc(size);
+ rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info) {
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
- memset(rxdr->buffer_info, 0, size);
/* Round up to nearest 4K */
@@ -1078,6 +1078,8 @@ ixgb_set_multi(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ /* disable VLAN filtering */
+ rctl &= ~IXGB_RCTL_CFIEN;
rctl &= ~IXGB_RCTL_VFE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
@@ -1086,7 +1088,9 @@ ixgb_set_multi(struct net_device *netdev)
} else {
rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
}
+ /* enable VLAN filtering */
rctl |= IXGB_RCTL_VFE;
+ rctl &= ~IXGB_RCTL_CFIEN;
}
if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
@@ -1105,6 +1109,12 @@ ixgb_set_multi(struct net_device *netdev)
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ ixgb_vlan_strip_enable(adapter);
+ else
+ ixgb_vlan_strip_disable(adapter);
+
}
/**
@@ -1252,7 +1262,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use;
@@ -2152,33 +2162,30 @@ static void
ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 ctrl, rctl;
- ixgb_irq_disable(adapter);
adapter->vlgrp = grp;
+}
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl |= IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-
- /* enable VLAN receive filtering */
+static void
+ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
+{
+ u32 ctrl;
- rctl = IXGB_READ_REG(&adapter->hw, RCTL);
- rctl &= ~IXGB_RCTL_CFIEN;
- IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
- } else {
- /* disable VLAN tag insert/strip */
+ /* enable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl |= IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+}
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
- }
+static void
+ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
+{
+ u32 ctrl;
- /* don't enable interrupts unless we are UP */
- if (adapter->netdev->flags & IFF_UP)
- ixgb_irq_enable(adapter);
+ /* disable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl &= ~IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
static void
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 88a08f056241..dd7fbeb1f7d1 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -191,9 +191,9 @@ struct ixgb_option {
} r;
struct { /* list_option info */
int nr;
- struct ixgb_opt_list {
+ const struct ixgb_opt_list {
int i;
- char *str;
+ const char *str;
} *p;
} l;
} arg;
@@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
break;
case list_option: {
int i;
- struct ixgb_opt_list *ent;
+ const struct ixgb_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
@@ -322,14 +322,15 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
{ /* Flow Control */
- struct ixgb_opt_list fc_list[] =
- {{ ixgb_fc_none, "Flow Control Disabled" },
- { ixgb_fc_rx_pause,"Flow Control Receive Only" },
- { ixgb_fc_tx_pause,"Flow Control Transmit Only" },
- { ixgb_fc_full, "Flow Control Enabled" },
- { ixgb_fc_default, "Flow Control Hardware Default" }};
+ static const struct ixgb_opt_list fc_list[] = {
+ { ixgb_fc_none, "Flow Control Disabled" },
+ { ixgb_fc_rx_pause, "Flow Control Receive Only" },
+ { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
+ { ixgb_fc_full, "Flow Control Enabled" },
+ { ixgb_fc_default, "Flow Control Hardware Default" }
+ };
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o
+ ixgbe_mbx.o ixgbe_x540.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb7..3ae30b8cb7d6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
#define IXGBE_MIN_RXD 64
/* flow control */
-#define IXGBE_DEFAULT_FCRTL 0x10000
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
-#define IXGBE_DEFAULT_FCRTH 0x20000
#define IXGBE_MIN_FCRTH 0x600
#define IXGBE_MAX_FCRTH 0x7FFF0
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
u64 bytes;
};
+struct ixgbe_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+ u64 rsc_count;
+ u64 rsc_flush;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+ __IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_DETECT_HANG,
+ __IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_RX_PS_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
void *desc; /* descriptor ring memory */
+ struct device *dev; /* device for DMA mapping */
+ struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long state;
u8 atr_sample_rate;
u8 atr_count;
u16 count; /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */
-
-#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
- u8 flags; /* per ring feature flags */
- u16 head;
- u16 tail;
-
- unsigned int total_bytes;
- unsigned int total_packets;
-
-#ifdef CONFIG_IXGBE_DCA
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u16 work_limit; /* max work per interrupt */
- u16 reg_idx; /* holds the special value that gets
+ u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
+ u16 work_limit; /* max work per interrupt */
+
+ u8 __iomem *tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
+ union {
+ struct ixgbe_tx_queue_stats tx_stats;
+ struct ixgbe_rx_queue_stats rx_stats;
+ };
int numa_node;
- unsigned long reinit_state;
- u64 rsc_count; /* stat for coalesced packets */
- u64 rsc_flush; /* stats for flushed packets */
- u32 restart_queue; /* track tx queue restarts */
- u32 non_eop_descs; /* track hardware descriptor chaining */
-
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
+ struct rcu_head rcu;
+ struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
unsigned int v_idx; /* index of q_vector within array, also used for
* finding the bit in EICR and friends that
* represents the vector for this ring */
+#ifdef CONFIG_IXGBE_DCA
+ int cpu; /* CPU for DCA */
+#endif
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
u8 rx_itr;
u32 eitr;
cpumask_var_t affinity_mask;
+ char name[IFNAMSIZ + 9];
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
int node;
struct work_struct check_overtemp_task;
u32 interrupt_event;
+ char lsc_int_name[IFNAMSIZ + 9];
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
- __IXGBE_FDIR_INIT_DONE,
__IXGBE_SFP_MODULE_NOT_FOUND
};
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+ u16 skb_cnt;
+ bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
enum ixgbe_boards {
board_82598,
board_82599,
+ board_X540,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct net_device *,
struct ixgbe_adapter *,
struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..d0f1d9d2c416 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
#define IXGBE_82598_MC_TBL_SIZE 128
#define IXGBE_82598_VFT_TBL_SIZE 128
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82598;
+ &ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82598 - Determines media type
* @hw: pointer to hardware structure
*
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- hw->fc.low_water);
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
+ reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb5bfd0..6827dddc383e 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -68,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- if (hw->phy.multispeed_fiber) {
- /* Set up dual speed SFP+ support */
- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ } else {
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
+ u32 reg_anlp1 = 0;
+ u32 i = 0;
u16 list_offset, data_offset, data_value;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -119,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP by setting Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
/* Release the semaphore */
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Delay obtaining semaphore again to allow FW access */
msleep(hw->eeprom.semaphore_delay);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msleep(4);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -174,7 +199,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82599;
+ &ixgbe_get_copper_link_capabilities_generic;
}
/* Set necessary function pointers based on phy type */
@@ -184,6 +209,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
+ case ixgbe_phy_aq:
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_generic;
+ break;
default:
break;
}
@@ -290,37 +319,6 @@ out:
}
/**
- * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82599 - Get media type
* @hw: pointer to hardware structure
*
@@ -332,7 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn) {
+ hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_aq) {
media_type = ixgbe_media_type_copper;
goto out;
}
@@ -342,11 +341,13 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
@@ -1924,6 +1925,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
hw->phy.ops.identify(hw);
if (hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_aq ||
hw->phy.type == ixgbe_phy_cu_unknown) {
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
&ext_ability);
@@ -2125,51 +2127,6 @@ fw_version_out:
return status;
}
-/**
- * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
- * the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- u16 offset, caps;
- u16 alt_san_mac_blk_offset;
-
- /* clear output first */
- *wwnn_prefix = 0xFFFF;
- *wwpn_prefix = 0xFFFF;
-
- /* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
-
- if ((alt_san_mac_blk_offset == 0) ||
- (alt_san_mac_blk_offset == 0xFFFF))
- goto wwn_prefix_out;
-
- /* check capability in alternative san mac address block */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
- if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
- goto wwn_prefix_out;
-
- /* get the corresponding prefix for WWNN/WWPN */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
- return 0;
-}
-
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
- .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2171,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
@@ -2240,5 +2198,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
- .mbx_ops = &mbx_ops_82599,
+ .mbx_ops = &mbx_ops_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca1316389..cc11e422ce9b 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -198,30 +196,110 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
* @hw: pointer to hardware structure
- * @pba_num: stores the part number from the EEPROM
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
*
- * Reads the part number from the EEPROM.
+ * Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ hw_dbg(hw, "PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
- *pba_num = (u32)(data << 16);
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
- *pba_num |= data;
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ hw_dbg(hw, "NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg(hw, "NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
return 0;
}
@@ -638,7 +716,7 @@ out:
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1009,7 +1087,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1072,7 +1150,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
@@ -1110,7 +1188,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
} else {
@@ -1595,6 +1673,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 rx_pba_size;
+ u32 fcrtl, fcrth;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1750,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
- /* Thresholds are different for link flow control when in DCB mode */
- if (reg & IXGBE_MTQC_RT_ENA) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- /* Always disable XON for LFC when in DCB mode */
- reg = (rx_pba_size >> 5) & 0xFFE0;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+ fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+ fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
- reg = (rx_pba_size >> 2) & 0xFFE0;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause)
- reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
- } else {
- /*
- * Set up and enable Rx high/low water mark thresholds,
- * enable XON.
- */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water |
- IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
- }
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2764,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return 0;
}
+
+/**
+ * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437dc..e1f980a8a09d 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,7 +35,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +50,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
@@ -81,7 +84,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca3..d16c260c1f50 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
- if (credit_max &&
+ if ((hw->mac.type == ixgbe_mac_82598EB) &&
+ credit_max &&
(credit_max < MINIMUM_CREDIT_FOR_TSO))
credit_max = MINIMUM_CREDIT_FOR_TSO;
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+ break;
+ default:
+ break;
+ }
return ret;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129e..1cfe38ee1644 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc59..9a5e89c12e05 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal) {
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- } else {
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFF0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFF0);
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f224715073..374e1f74d0f5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal)
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- else
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFE0);
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..bf566e8a455e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ break;
+ default:
+ break;
}
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
+ default:
+ break;
+ }
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
+ break;
+ default:
+ break;
}
}
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
if (adapter->dcb_cfg.pfc_mode_enable) {
- if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
- (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
- adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+ adapter->last_lfc_mode =
+ adapter->hw.fc.current_mode;
+ break;
+ default:
+ break;
+ }
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
} else {
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- else
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ break;
+ default:
+ break;
+ }
}
if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff2..23ff23e8b393 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
+ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+ (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
} else {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* Get PHY type */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
/* Copper 10G-BASET */
ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
else
pause->autoneg = 1;
-#ifdef CONFIG_DCB
- if (hw->fc.current_mode == ixgbe_fc_pfc) {
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- }
-
-#endif
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
} else if (hw->fc.current_mode == ixgbe_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+ } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+#endif
}
}
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return -EINVAL;
#endif
-
fc = hw->fc;
if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
else
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
-
return 0;
}
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 feature_list;
- if (data) {
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features &= ~NETIF_F_SCTP_CSUM;
+ feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ feature_list |= NETIF_F_SCTP_CSUM;
+ break;
+ default:
+ break;
}
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
return 0;
}
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
- for (i = 0; i < 8; i++)
- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
- for (i = 0; i < 8; i++)
- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ for (i = 0; i < 8; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ break;
+ case ixgbe_mac_82599EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ break;
+ default:
+ break;
+ }
+ }
regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+ /* DCB */
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -820,9 +839,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->driver, ixgbe_driver_name,
+ sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version));
+ sizeof(drvinfo->version) - 1);
snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_version & 0xF000) >> 12,
@@ -905,13 +925,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(adapter,
- &temp_tx_ring[i]);
+ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_tx_ring[i]);
}
goto clear_reset;
}
@@ -930,13 +948,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(adapter,
- &temp_rx_ring[i]);
+ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(adapter,
- &temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_rx_ring[i]);
}
goto err_setup;
}
@@ -951,8 +967,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter,
- adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -962,8 +977,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter,
- adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -1144,7 +1158,7 @@ struct ixgbe_reg_test {
#define TABLE64_TEST_HI 6
/* default 82599 register test */
-static struct ixgbe_reg_test reg_test_82599[] = {
+static const struct ixgbe_reg_test reg_test_82599[] = {
{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1168,7 +1182,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
};
/* default 82598 register test */
-static struct ixgbe_reg_test reg_test_82598[] = {
+static const struct ixgbe_reg_test reg_test_82598[] = {
{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1195,18 +1209,22 @@ static struct ixgbe_reg_test reg_test_82598[] = {
{ 0, 0, 0, 0 }
};
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
- const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
- writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
- if (val != (_test[pat] & W & M)) { \
- e_err(drv, "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
+ if (val != (register_test_patterns[pat] & W & M)) { \
+ e_err(drv, "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
@@ -1233,16 +1251,24 @@ static struct ixgbe_reg_test reg_test_82598[] = {
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
- struct ixgbe_reg_test *test;
+ const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- toggle = 0x7FFFF30F;
- test = reg_test_82599;
- } else {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
test = reg_test_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ break;
+ default:
+ *data = 1;
+ return 1;
+ break;
}
/*
@@ -1460,16 +1486,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+ break;
+ default:
+ break;
}
ixgbe_reset(adapter);
- ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
- ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+ ixgbe_free_tx_resources(&adapter->test_tx_ring);
+ ixgbe_free_rx_resources(&adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1514,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IXGBE_DEFAULT_TXD;
tx_ring->queue_index = 0;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring->numa_node = adapter->node;
- err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ err = ixgbe_setup_tx_resources(tx_ring);
if (err)
return 1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+ break;
+ default:
+ break;
}
ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1539,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Rx Descriptor ring and Rx buffers */
rx_ring->count = IXGBE_DEFAULT_RXD;
rx_ring->queue_index = 0;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
rx_ring->numa_node = adapter->node;
- err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
@@ -1604,8 +1644,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
return 13;
}
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
struct ixgbe_ring *tx_ring,
unsigned int size)
{
@@ -1627,7 +1666,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
bufsz,
DMA_FROM_DEVICE);
@@ -1639,7 +1678,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -1655,7 +1694,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
}
/* re-map buffers to ring, store next to clean values */
- ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+ ixgbe_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
@@ -1699,7 +1738,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
for (i = 0; i < 64; i++) {
skb_get(skb);
tx_ret_val = ixgbe_xmit_frame_ring(skb,
- adapter->netdev,
adapter,
tx_ring);
if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1752,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200);
- good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
- tx_ring, size);
+ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
if (good_cnt != 64) {
ret_val = 13;
break;
@@ -1847,7 +1884,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int retval = 1;
+ /* WOL not supported except for the following */
switch(hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (hw->subsystem_device_id ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
case IXGBE_DEV_ID_82599_KX4:
retval = 0;
break;
@@ -1985,6 +2040,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ return false;
+
+ /* if interrupt rate is too high then disable RSC */
+ if (ec->rx_coalesce_usecs != 1 &&
+ ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ e_info(probe, "rx-usecs set too low, "
+ "disabling RSC\n");
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ } else {
+ /* check the feature flag value and enable RSC if necessary */
+ if ((netdev->features & NETIF_F_LRO) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ e_info(probe, "rx-usecs set to %d, "
+ "re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -2002,17 +2092,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
- u32 max_int;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- max_int = IXGBE_MAX_RSC_INT_RATE;
- else
- max_int = IXGBE_MAX_INT_RATE;
-
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > max_int) ||
+ if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2021,32 +2108,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
} else {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
-
- /*
- * if hardware RSC is enabled, disable it when
- * setting low latency mode, to avoid errata, assuming
- * that when the user set low latency mode they want
- * it at the cost of anything else
- */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- if (netdev->features & NETIF_F_LRO) {
- netdev->features &= ~NETIF_F_LRO;
- e_info(probe, "rx-usecs set to 0, "
- "disabling RSC\n");
- }
- need_reset = true;
- }
}
if (ec->tx_coalesce_usecs > 1) {
@@ -2127,34 +2203,45 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
need_reset = (data & ETH_FLAG_RXVLAN) !=
(netdev->features & NETIF_F_HW_VLAN_RX);
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO |
+ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
if (rc)
return rc;
/* if state changes we need to update adapter->flags and reset */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
- /*
- * cast both to bool and verify if they are set the same
- * but only enable RSC if itr is non-zero, as
- * itr=0 and RSC are mutually exclusive
- */
- if (((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
- adapter->rx_itr_setting) {
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ (!!(data & ETH_FLAG_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+ if ((data & ETH_FLAG_LRO) &&
+ (!adapter->rx_itr_setting ||
+ (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
+ e_info(probe, "rx-usecs set too low, "
+ "not enabling RSC.\n");
+ } else {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
need_reset = true;
break;
+ case ixgbe_mac_X540: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring =
+ adapter->rx_ring[i];
+ if (adapter->flags2 &
+ IXGBE_FLAG2_RSC_ENABLED) {
+ ixgbe_configure_rscctl(adapter,
+ ring);
+ } else {
+ ixgbe_clear_rscctl(adapter,
+ ring);
+ }
+ }
+ }
+ break;
default:
break;
}
- } else if (!adapter->rx_itr_setting) {
- netdev->features &= ~NETIF_F_LRO;
- if (data & ETH_FLAG_LRO)
- e_info(probe, "rx-usecs set to 0, "
- "LRO/RSC cannot be enabled.\n");
}
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8e..6342d4859790 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
- ddp->err = 0;
+ ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
+ u32 fcbuff;
if (!netdev)
goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
+
+ /* guaranteed to be invalidated after 100us */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_RE));
+ fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
+ if (fcbuff & IXGBE_FCBUFF_VALID)
+ udelay(100);
}
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
return 0;
}
+ /* no DDP if we are already down or resetting */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return 0;
+
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a1..ca9036de49f9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.84-k2"
+#define DRV_VERSION "3.0.12-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +109,16 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+ board_X540 },
/* required last entry */
{0, }
@@ -560,6 +567,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +597,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
{
u32 mask;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -626,92 +639,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
*
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
*
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
*/
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
{
- u32 txoff = IXGBE_TFCS_TXOFF;
+ int tc = -1;
+ int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
-#ifdef CONFIG_IXGBE_DCB
- if (adapter->dcb_cfg.pfc_mode_enable) {
- int tc;
- int reg_idx = tx_ring->reg_idx;
- int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ /* if DCB is not enabled the queues have no TC */
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return tc;
+
+ /* check valid range */
+ if (reg_idx >= adapter->hw.mac.max_tx_queues)
+ return tc;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ tc = reg_idx >> 2;
+ break;
+ default:
+ if (dcb_i != 4 && dcb_i != 8)
+ break;
+
+ /* if VMDq is enabled the lowest order bits determine TC */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ tc = reg_idx & (dcb_i - 1);
+ break;
+ }
+
+ /*
+ * Convert the reg_idx into the correct TC. This bitmask
+ * targets the last full 32 ring traffic class and assigns
+ * it a value of 1. From there the rest of the rings are
+ * based on shifting the mask further up to include the
+ * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+ * will only ever be 8 or 4 and that reg_idx will never
+ * be greater then 128. The code without the power of 2
+ * optimizations would be:
+ * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+ */
+ tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+ tc >>= 9 - (reg_idx >> 5);
+ }
+
+ return tc;
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 data = 0;
+ u32 xoff[8] = {0};
+ int i;
- switch (adapter->hw.mac.type) {
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+ switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- tc = reg_idx >> 2;
- txoff = IXGBE_TFCS_TXOFF0;
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
break;
- case ixgbe_mac_82599EB:
- tc = 0;
- txoff = IXGBE_TFCS_TXOFF;
- if (dcb_i == 8) {
- /* TC0, TC1 */
- tc = reg_idx >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 64) >> 4;
- else if (tc == 3) /* TC4, TC5, TC6, TC7 */
- tc += 1 + ((reg_idx - 96) >> 3);
- } else if (dcb_i == 4) {
- /* TC0, TC1 */
- tc = reg_idx >> 6;
- if (tc == 1) {
- tc += (reg_idx - 64) >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 96) >> 4;
- }
- }
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+ return;
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+ return;
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
- tc = 0;
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
- txoff <<= tc;
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
-#endif
- return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop)
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
{
+ return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of eop */
- adapter->detect_tx_hung = false;
- if (tx_ring->tx_buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- ixgbe_tx_xon_state(adapter, tx_ring)) {
- /* detected Tx unit hang */
- union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
- e_err(drv, "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, tx_ring->head),
- IXGBE_READ_REG(hw, tx_ring->tail),
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
- return true;
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /*
+ * Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * pfc clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+ /* reset the countdown */
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
- return false;
+ return ret;
}
#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +821,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ u16 i, eop, count = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,147 +835,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
- struct sk_buff *skb;
tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
- unsigned int hlen = skb_headlen(skb);
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && (skb->protocol == htons(ETH_P_FCOE)) &&
- skb_is_gso(skb)) {
- hlen = skb_transport_offset(skb) +
- sizeof(struct fc_frame_header) +
- sizeof(struct fcoe_crc_eof);
- segs = DIV_ROUND_UP(skb->len - hlen,
- skb_shinfo(skb)->gso_size);
- }
-#endif /* IXGBE_FCOE */
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * hlen) + skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
-
- ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
tx_desc->wb.status = 0;
+ cleaned = (i == eop);
i++;
if (i == tx_ring->count)
i = 0;
+
+ if (cleaned && tx_buffer_info->skb) {
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
+ }
+
+ ixgbe_unmap_and_free_tx_resource(tx_ring,
+ tx_buffer_info);
}
+ tx_ring->tx_stats.completed++;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
}
tx_ring->next_to_clean = i;
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ struct ixgbe_hw *hw = &adapter->hw;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ e_info(probe,
+ "tx hang %d detected on queue %d, resetting adapter\n",
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbe_tx_timeout(adapter->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
- /* schedule immediate reset if we believe we hung */
- e_info(probe, "tx hang %d detected, resetting "
- "adapter\n", adapter->tx_timeout_count + 1);
- ixgbe_tx_timeout(adapter->netdev);
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
}
}
- /* re-arm the interrupt */
- if (count >= tx_ring->work_limit)
- ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
- tx_ring->stats.bytes += total_bytes;
- u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
- int cpu = get_cpu();
- int q = rx_ring->reg_idx;
-
- if (rx_ring->cpu != cpu) {
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
- }
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
- rx_ring->cpu = cpu;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
}
- put_cpu();
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
int cpu = get_cpu();
- int q = tx_ring->reg_idx;
- struct ixgbe_hw *hw = &adapter->hw;
+ long r_idx;
+ int i;
- if (tx_ring->cpu != cpu) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
- }
- tx_ring->cpu = cpu;
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
}
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ q_vector->cpu = cpu;
+out_no_update:
put_cpu();
}
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
+ int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -898,22 +1019,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->cpu = -1;
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->cpu = -1;
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
}
}
static int __ixgbe_notify_dca(struct device *dev, void *data)
{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return 0;
+
switch (event) {
case DCA_PROVIDER_ADD:
/* if we're already enabled, don't do it again */
@@ -1012,8 +1136,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
@@ -1022,72 +1145,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
**/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len;
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev)
+ return;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
- if (!bi->page_dma &&
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
- if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
- bufsz);
- bi->skb = skb;
-
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = dma_map_single(&pdev->dev,
- bi->skb->data,
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(rx_ring->netdev);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -1098,56 +1230,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+ ixgbe_release_rx_desc(rx_ring, i);
}
}
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
-{
- return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >>
- IXGBE_RXDADV_RSCCNT_SHIFT;
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
}
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
*
* This function changes a queue full of hw rsc buffers into a completed
* packet. It uses the ->prev pointers to find the first packet and then
* turns it into the frag list owner.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
- u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
{
unsigned int frag_list_size = 0;
+ unsigned int skb_cnt = 1;
while (skb->prev) {
struct sk_buff *prev = skb->prev;
frag_list_size += skb->len;
skb->prev = NULL;
skb = prev;
- *count += 1;
+ skb_cnt++;
}
skb_shinfo(skb)->frag_list = skb->next;
@@ -1155,68 +1279,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
skb->len += frag_list_size;
skb->data_len += frag_list_size;
skb->truesize += frag_list_size;
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
return skb;
}
-struct ixgbe_rsc_cb {
- dma_addr_t dma;
- bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK);
+}
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i, rsc_count = 0;
- u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
#ifdef IXGBE_FCOE
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
+ bool pkt_is_rsc = false;
i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- if ((len > IXGBE_RX_HDR_SIZE) ||
- (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
- len = IXGBE_RX_HDR_SIZE;
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
skb = rx_buffer_info->skb;
- prefetch(skb->data);
rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+ /* if this is a skb from previous receive DMA will be 0 */
if (rx_buffer_info->dma) {
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev))) {
+ u16 hlen;
+ if (pkt_is_rsc &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -1227,29 +1342,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
- skb_put(skb, len);
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
}
if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
rx_buffer_info->page_offset,
upper_len);
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
skb->len += upper_len;
skb->data_len += upper_len;
@@ -1264,10 +1392,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd);
cleaned_count++;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
- rsc_count = ixgbe_get_rsc_count(rx_desc);
-
- if (rsc_count) {
+ if (pkt_is_rsc) {
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1275,32 +1400,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_buffer = &rx_ring->rx_buffer_info[i];
}
- if (staterr & IXGBE_RXD_STAT_EOP) {
- if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb,
- &(rx_ring->rsc_count));
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
- IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(skb)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
- }
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count +=
- skb_shinfo(skb)->nr_frags;
- else
- rx_ring->rsc_count++;
- rx_ring->rsc_flush++;
- }
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets++;
- rx_ring->stats.bytes += skb->len;
- u64_stats_update_end(&rx_ring->syncp);
- } else {
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@@ -1309,12 +1410,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- rx_ring->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
+ if (skb->prev) {
+ skb = ixgbe_transform_rsc_queue(skb);
+ /* if we got here without RSC the packet is invalid */
+ if (!pkt_is_rsc) {
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ }
+ if (pkt_is_rsc) {
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count +=
+ IXGBE_RSC_CB(skb)->skb_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
+ /* trim packet back to size 0 and recycle it */
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
goto next_desc;
}
@@ -1324,7 +1458,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1338,16 +1472,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_desc:
rx_desc->wb.upper.status_error = 0;
+ (*work_done)++;
+ if (*work_done >= work_to_do)
+ break;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
@@ -1355,14 +1491,14 @@ next_desc:
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
if (cleaned_count)
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
#ifdef IXGBE_FCOE
/* include DDPed FCoE data */
if (ddp_bytes > 0) {
unsigned int mss;
- mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
sizeof(struct fc_frame_header) -
sizeof(struct fcoe_crc_eof);
if (mss > 512)
@@ -1374,8 +1510,10 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
-
- return cleaned;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1389,7 +1527,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int i, j, q_vectors, v_idx, r_idx;
+ int i, q_vectors, v_idx, r_idx;
u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1405,8 +1543,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 0, j, v_idx);
+ u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
r_idx + 1);
@@ -1415,8 +1553,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 1, j, v_idx);
+ u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
r_idx + 1);
@@ -1447,11 +1585,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
}
}
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
v_idx);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
+ break;
+
+ default:
+ break;
+ }
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
/* set up to autoclear timer, and the vectors */
@@ -1547,12 +1693,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
/* must write high and low 16 bits to reset counter */
itr_reg |= (itr_reg << 16);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/*
- * 82599 can support a value of zero, so allow it for
+ * 82599 and X540 can support a value of zero, so allow it for
* max interrupt rate, but there is an errata where it can
* not be zero with RSC
*/
@@ -1565,6 +1714,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
* immediate assertion of the interrupt
*/
itr_reg |= IXGBE_EITR_CNT_WDIS;
+ break;
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
@@ -1572,14 +1724,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ int i, r_idx;
u32 new_itr;
u8 current_itr, ret_itr;
- int i, r_idx;
- struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = adapter->tx_ring[r_idx];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1594,7 +1745,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = adapter->rx_ring[r_idx];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1625,7 +1776,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
/* save the algorithm value here, not the smoothed one */
q_vector->eitr = new_itr;
@@ -1693,17 +1844,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->sfp_config_module_task);
+ }
+
if (eicr & IXGBE_EICR_GPI_SDP1) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- schedule_work(&adapter->multispeed_fiber_task);
- } else if (eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- schedule_work(&adapter->sfp_config_module_task);
- } else {
- /* Interrupt isn't for us... */
- return;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->multispeed_fiber_task);
}
}
@@ -1743,16 +1895,16 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
- if (hw->mac.type == ixgbe_mac_82598EB)
- ixgbe_check_fan_failure(adapter, eicr);
-
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
- adapter->interrupt_event = eicr;
if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
schedule_work(&adapter->check_overtemp_task);
-
+ }
+ /* now fallthrough to handle Flow Director */
+ case ixgbe_mac_X540:
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int i;
@@ -1762,12 +1914,18 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
adapter->tx_ring[i];
- if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state))
schedule_work(&adapter->fdir_reinit_task);
}
}
+ break;
+ default:
+ break;
}
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -1778,15 +1936,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1795,15 +1962,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1846,8 +2022,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
int r_idx;
int i;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
@@ -1858,7 +2039,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- /* disable interrupts on this vector only */
/* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
@@ -1917,13 +2097,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = adapter->rx_ring[r_idx];
+
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
@@ -1957,13 +2138,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
long r_idx;
bool tx_clean_complete = true;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, ring);
-#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
@@ -1976,10 +2158,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, ring);
-#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
@@ -2018,13 +2196,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, tx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ tx_ring = adapter->tx_ring[r_idx];
+
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
@@ -2045,24 +2224,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
set_bit(r_idx, q_vector->rxr_idx);
q_vector->rxr_count++;
+ rx_ring->q_vector = q_vector;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
int t_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
set_bit(t_idx, q_vector->txr_idx);
q_vector->txr_count++;
+ tx_ring->q_vector = q_vector;
}
/**
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
* @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
*
* This function maps descriptor rings to the queue-specific vectors
* we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2070,9 +2252,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
+ int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_rx_queues;
@@ -2085,11 +2267,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
goto out;
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
/*
* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -2105,23 +2289,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
* multiple queues per vector.
*/
/* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
for (j = 0; j < rqpv; j++) {
map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
- }
- for (i = v_start; i < vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
}
-
out:
return err;
}
@@ -2143,30 +2324,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* Map the Tx/Rx rings to the vectors we were allotted. */
- err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+ err = ixgbe_map_rings_to_vectors(adapter);
if (err)
- goto out;
+ return err;
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbe_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbe_msix_clean_tx : \
+ NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ handler = SET_HANDLER(q_vector);
if (handler == &ixgbe_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
} else if (handler == &ixgbe_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
-
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
+ } else if (handler == &ixgbe_msix_clean_many) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ handler, 0, q_vector->name,
+ q_vector);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
@@ -2174,9 +2361,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+ sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2192,17 +2379,16 @@ free_queue_irqs:
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-out:
return err;
}
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- u8 current_itr;
- u32 new_itr = q_vector->eitr;
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -2232,9 +2418,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
- /* save the algorithm value here, not the smoothed one */
+ /* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
@@ -2255,12 +2441,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
mask |= IXGBE_EIMS_GPI_SDP0;
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
if (adapter->num_vfs)
mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2316,13 +2507,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
+ schedule_work(&adapter->check_overtemp_task);
+ }
+ break;
+ default:
+ break;
+ }
ixgbe_check_fan_failure(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
- schedule_work(&adapter->check_overtemp_task);
if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
@@ -2415,14 +2613,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
if (adapter->num_vfs > 32)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+ break;
+ default:
+ break;
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2468,7 +2672,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2483,8 +2687,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->head = IXGBE_TDH(reg_idx);
- ring->tail = IXGBE_TDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
/* configure fetching thresholds */
if (adapter->rx_itr_setting == 0) {
@@ -2500,7 +2703,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
}
/* reinitialize flowdirector state */
- set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2591,16 +2803,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
u32 srrctl;
- int index;
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ u8 reg_idx = rx_ring->reg_idx;
- index = rx_ring->reg_idx;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- unsigned long mask;
- mask = (unsigned long) feature[RING_F_RSS].mask;
- index = index & mask;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB: {
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ const int mask = feature[RING_F_RSS].mask;
+ reg_idx = reg_idx & mask;
+ }
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ break;
}
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2610,7 +2828,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(rx_ring)) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@@ -2623,7 +2841,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2693,19 +2911,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl;
+ u8 reg_idx = ring->reg_idx;
+
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+ rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
int rx_buf_len;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ if (!ring_is_rsc_enabled(ring))
return;
rx_buf_len = ring->rx_buf_len;
@@ -2716,7 +2951,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(ring)) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -2769,9 +3004,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
- int reg_idx = ring->reg_idx;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2795,7 +3030,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
u32 rxdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2809,8 +3044,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->head = IXGBE_RDH(reg_idx);
- ring->tail = IXGBE_RDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -2832,7 +3066,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2955,24 +3189,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
- rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
else
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_rsc_enabled(rx_ring);
#ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((i >= f->mask) && (i < f->mask + f->indices)) {
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_ps_enabled(rx_ring);
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
#endif /* IXGBE_FCOE */
}
-
}
static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2995,6 +3237,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3122,6 +3365,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3151,6 +3395,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3348,8 +3593,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 txdctl;
- int i, j;
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3365,25 +3608,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
- /* reconfigure the hardware */
- ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* PThresh workaround for Tx hang with DFP enabled. */
- txdctl |= 32;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- }
/* Enable VLAN tag insert/strip */
adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+ /* reconfigure the hardware */
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
}
#endif
@@ -3515,8 +3751,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
break;
- default:
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
break;
@@ -3560,13 +3797,24 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
- /* enable the optics */
- if (hw->phy.multispeed_fiber)
+ /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
ixgbe_irq_enable(adapter, true, true);
@@ -3589,26 +3837,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- if (hw->phy.type == ixgbe_phy_unknown) {
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- /*
- * Take the device down and schedule the sfp tasklet
- * which will unregister_netdev and log it.
- */
- ixgbe_down(adapter);
- schedule_work(&adapter->sfp_config_module_task);
- return err;
- }
- }
-
- if (ixgbe_is_sfp(hw)) {
- ixgbe_sfp_link_config(adapter);
- } else {
- err = ixgbe_non_sfp_link_config(hw);
- if (err)
- e_err(probe, "link_config FAILED %d\n", err);
- }
+ if (hw->phy.type == ixgbe_phy_unknown)
+ schedule_work(&adapter->sfp_config_module_task);
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -3686,15 +3916,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
@@ -3706,7 +3934,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -3717,7 +3945,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(dev,
IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
@@ -3731,7 +3959,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
@@ -3748,24 +3976,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buffer_info)
@@ -3774,7 +3995,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3785,11 +4006,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -3801,7 +4017,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -3813,7 +4029,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3822,7 +4038,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
u32 txdctl;
- int i, j;
+ int i;
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
@@ -3880,26 +4096,36 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
}
/* Disable the Tx DMA engine on 82599 */
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
-
- /* power down the optics */
- if (hw->phy.multispeed_fiber)
- hw->mac.ops.disable_tx_laser(hw);
+ break;
+ default:
+ break;
+ }
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
@@ -3924,10 +4150,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
int tx_clean_complete, work_done = 0;
#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
- }
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
#endif
tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3955,6 +4179,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
+
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
}
@@ -3969,8 +4195,6 @@ static void ixgbe_reset_task(struct work_struct *work)
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
- adapter->tx_timeout_count++;
-
ixgbe_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
@@ -4220,19 +4444,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
- bool ret = false;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
- } else {
- ret = false;
- }
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
- return ret;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
#ifdef CONFIG_IXGBE_DCB
@@ -4249,71 +4470,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
bool ret = false;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- /* the number of queues is assumed to be symmetric */
- for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i]->reg_idx = i << 3;
- adapter->tx_ring[i]->reg_idx = i << 2;
- }
- ret = true;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- if (dcb_i == 8) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 32
- * Tx TC2 starts at: descriptor queue 64
- * Tx TC3 starts at: descriptor queue 80
- * Tx TC4 starts at: descriptor queue 96
- * Tx TC5 starts at: descriptor queue 104
- * Tx TC6 starts at: descriptor queue 112
- * Tx TC7 starts at: descriptor queue 120
- *
- * Rx TC0-TC7 are offset by 16 queues each
- */
- for (i = 0; i < 3; i++) {
- adapter->tx_ring[i]->reg_idx = i << 5;
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < 5; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 2) << 4);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 8) << 3);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return false;
- ret = true;
- } else if (dcb_i == 4) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 64
- * Tx TC2 starts at: descriptor queue 96
- * Tx TC3 starts at: descriptor queue 112
- *
- * Rx TC0-TC3 are offset by 32 queues each
- */
- adapter->tx_ring[0]->reg_idx = 0;
- adapter->tx_ring[1]->reg_idx = 64;
- adapter->tx_ring[2]->reg_idx = 96;
- adapter->tx_ring[3]->reg_idx = 112;
- for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i]->reg_idx = i << 5;
-
- ret = true;
- } else {
- ret = false;
+ /* the number of queues is assumed to be symmetric */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ for (i = 0; i < dcb_i; i++) {
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
+ }
+ ret = true;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (dcb_i == 8) {
+ /*
+ * Tx TC0 starts at: descriptor queue 0
+ * Tx TC1 starts at: descriptor queue 32
+ * Tx TC2 starts at: descriptor queue 64
+ * Tx TC3 starts at: descriptor queue 80
+ * Tx TC4 starts at: descriptor queue 96
+ * Tx TC5 starts at: descriptor queue 104
+ * Tx TC6 starts at: descriptor queue 112
+ * Tx TC7 starts at: descriptor queue 120
+ *
+ * Rx TC0-TC7 are offset by 16 queues each
+ */
+ for (i = 0; i < 3; i++) {
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
- } else {
- ret = false;
+ for ( ; i < 5; i++) {
+ adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+ adapter->rx_ring[i]->reg_idx = i << 4;
+ }
+ for ( ; i < dcb_i; i++) {
+ adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+ adapter->rx_ring[i]->reg_idx = i << 4;
+ }
+ ret = true;
+ } else if (dcb_i == 4) {
+ /*
+ * Tx TC0 starts at: descriptor queue 0
+ * Tx TC1 starts at: descriptor queue 64
+ * Tx TC2 starts at: descriptor queue 96
+ * Tx TC3 starts at: descriptor queue 112
+ *
+ * Rx TC0-TC3 are offset by 32 queues each
+ */
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
+ for (i = 0 ; i < dcb_i; i++)
+ adapter->rx_ring[i]->reg_idx = i << 5;
+ ret = true;
}
- } else {
- ret = false;
+ break;
+ default:
+ break;
}
-
return ret;
}
#endif
@@ -4353,55 +4570,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
- bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- ixgbe_cache_ring_dcb(adapter);
- /* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
- /*
- * In 82599, the number of Tx queues for each traffic
- * class for both 8-TC and 4-TC modes are:
- * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
- * 8 TCs: 32 32 16 16 8 8 8 8
- * 4 TCs: 64 64 32 32
- * We have max 8 queues for FCoE, where 8 the is
- * FCoE redirection table size. If TC for FCoE is
- * less than or equal to TC3, we have enough queues
- * to add max of 8 queues for FCoE, so we start FCoE
- * tx descriptor from the next one, i.e., reg_idx + 1.
- * If TC for FCoE is above TC3, implying 8 TC mode,
- * and we need 8 for FCoE, we have to take all queues
- * in that traffic class for FCoE.
- */
- if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
- fcoe_tx_i--;
- }
+ ixgbe_cache_ring_dcb(adapter);
+ /* find out queues in TC for FCoE */
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+ /*
+ * In 82599, the number of Tx queues for each traffic
+ * class for both 8-TC and 4-TC modes are:
+ * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+ * 8 TCs: 32 32 16 16 8 8 8 8
+ * 4 TCs: 64 64 32 32
+ * We have max 8 queues for FCoE, where 8 the is
+ * FCoE redirection table size. If TC for FCoE is
+ * less than or equal to TC3, we have enough queues
+ * to add max of 8 queues for FCoE, so we start FCoE
+ * Tx queue from the next one, i.e., reg_idx + 1.
+ * If TC for FCoE is above TC3, implying 8 TC mode,
+ * and we need 8 for FCoE, we have to take all queues
+ * in that traffic class for FCoE.
+ */
+ if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+ fcoe_tx_i--;
+ }
#endif /* CONFIG_IXGBE_DCB */
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
- ret = true;
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
}
- return ret;
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
}
#endif /* IXGBE_FCOE */
@@ -4470,65 +4687,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
**/
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
- int i;
- int orig_node = adapter->node;
+ int rx = 0, tx = 0, nid = adapter->node;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ for (; tx < adapter->num_tx_queues; tx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_tx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = tx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->tx_ring[i] = ring;
+ adapter->tx_ring[tx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ struct ixgbe_ring *ring;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_rx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = rx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->rx_ring[i] = ring;
+ adapter->rx_ring[rx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-
ixgbe_cache_ring_register(adapter);
return 0;
-err_rx_ring_allocation:
- for (i = 0; i < adapter->num_tx_queues; i++)
- kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx)
+ kfree(adapter->tx_ring[--tx]);
+
+ while (rx)
+ kfree(adapter->rx_ring[--rx]);
return -ENOMEM;
}
@@ -4750,6 +4957,11 @@ err_set_interrupt:
return err;
}
+static void ring_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
@@ -4766,10 +4978,18 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ /* ixgbe_get_stats64() might access this ring, we must wait
+ * a grace period before freeing it.
+ */
+ call_rcu(&ring->rcu, ring_free_rcu);
adapter->rx_ring[i] = NULL;
}
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
@@ -4843,6 +5063,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
int j;
struct tc_configuration *tc;
#endif
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* PCI config space info */
@@ -4857,11 +5078,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- if (hw->mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4890,6 +5114,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
}
#ifdef CONFIG_IXGBE_DCB
@@ -4919,8 +5146,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
@@ -4958,30 +5185,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
- memset(tx_ring->tx_buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4994,7 +5218,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -5013,7 +5237,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5025,48 +5249,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vmalloc(size);
- if (!rx_ring->rx_buffer_info) {
- e_err(probe, "vmalloc allocation failed for the Rx "
- "descriptor ring\n");
- goto alloc_failed;
- }
- memset(rx_ring->rx_buffer_info, 0, size);
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- e_err(probe, "Memory allocation failed for the Rx "
- "descriptor ring\n");
- vfree(rx_ring->rx_buffer_info);
- goto alloc_failed;
- }
+ if (!rx_ring->desc)
+ goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
-
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -5080,13 +5296,12 @@ alloc_failed:
*
* Return 0 on success, negative on failure
**/
-
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5098,23 +5313,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_tx_ring(adapter, tx_ring);
+ ixgbe_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -5131,28 +5346,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
- ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_rx_ring(adapter, rx_ring);
+ ixgbe_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5169,7 +5384,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
- ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -5182,6 +5397,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5408,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -5287,8 +5506,8 @@ static int ixgbe_close(struct net_device *netdev)
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -5319,7 +5538,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
if (netif_running(netdev)) {
- err = ixgbe_open(adapter->netdev);
+ err = ixgbe_open(netdev);
if (err)
return err;
}
@@ -5332,8 +5551,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl, fctrl;
u32 wufc = adapter->wol;
@@ -5350,6 +5569,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_rx_resources(adapter);
}
+ ixgbe_clear_interrupt_scheme(adapter);
+
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
@@ -5376,15 +5597,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
}
- if (wufc && hw->mac.type == ixgbe_mac_82599EB)
- pci_wake_from_d3(pdev, true);
- else
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
pci_wake_from_d3(pdev, false);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pci_wake_from_d3(pdev, !!wufc);
+ break;
+ default:
+ break;
+ }
*enable_wake = !!wufc;
- ixgbe_clear_interrupt_scheme(adapter);
-
ixgbe_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5433,10 +5659,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- u64 non_eop_descs = 0, restart_queue = 0;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5449,21 +5677,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i]->rsc_count;
- rsc_flush += adapter->rx_ring[i]->rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
/* gather some stats to the adapter struct that are per queue */
- for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i]->restart_queue;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
- adapter->non_eop_descs = non_eop_descs;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
@@ -5478,17 +5726,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- if (hw->mac.type == ixgbe_mac_82599EB) {
- hwstats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
}
hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5497,21 +5746,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* work around hardware counting issue */
hwstats->gprc -= missed_rx;
+ ixgbe_update_xoff_received(adapter);
+
/* 82598 hardware only has a 32 bit counter in the high register */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u64 tmp;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
- /* 4 high bits of GORC */
- hwstats->gorc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
- /* 4 high bits of GOTC */
- hwstats->gotc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
#ifdef IXGBE_FCOE
@@ -5522,12 +5775,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
#endif /* IXGBE_FCOE */
- } else {
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ default:
+ break;
}
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
hwstats->bprc += bprc;
@@ -5700,8 +5950,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
@@ -5763,17 +6013,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
bool flow_rx, flow_tx;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
- flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
- flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
}
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
+ }
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5787,7 +6047,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev);
} else {
/* Force detection of hung controller */
- adapter->detect_tx_hung = true;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = adapter->tx_ring[i];
+ set_check_for_tx_hang(tx_ring);
+ }
}
} else {
adapter->link_up = false;
@@ -5823,7 +6086,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ u32 tx_flags, u8 *hdr_len, __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5841,7 +6104,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5880,7 +6143,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +6169,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ __be16 protocol)
{
u32 rtn = 0;
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = ((const struct vlan_ethhdr *)skb->data)->
- h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +6200,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
default:
if (unlikely(net_ratelimit()))
e_warn(probe, "partial checksum but proto=%x!\n",
- skb->protocol);
+ protocol);
break;
}
@@ -5952,7 +6209,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5981,7 +6239,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -6004,15 +6262,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ unsigned int first, const u8 hdr_len)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
+ unsigned int bytecount = skb->len;
+ u16 gso_segs = 1;
i = tx_ring->next_to_use;
@@ -6027,10 +6287,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&pdev->dev,
+ tx_buffer_info->dma = dma_map_single(dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6063,12 +6323,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_page(dev,
frag->page,
offset, size,
DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6082,6 +6342,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
break;
}
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ bytecount += (gso_segs - 1) * hdr_len;
+
+ /* multiply data chunks by size of headers */
+ tx_ring->tx_buffer_info[i].bytecount = bytecount;
+ tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
@@ -6103,14 +6376,13 @@ dma_error:
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return 0;
}
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,60 +6447,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ writel(i, tx_ring->tail);
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ u8 queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
- struct tcphdr *th;
struct iphdr *iph = ip_hdr(skb);
struct ethhdr *eth = (struct ethhdr *)skb->data;
- u16 vlan_id, src_port, dst_port, flex_bytes;
- u32 src_ipv4_addr, dst_ipv4_addr;
- u8 l4type = 0;
+ struct tcphdr *th;
+ u16 vlan_id;
- /* Right now, we support IPv4 only */
- if (skb->protocol != htons(ETH_P_IP))
- return;
- /* check if we're UDP or TCP */
- if (iph->protocol == IPPROTO_TCP) {
- th = tcp_hdr(skb);
- src_port = th->source;
- dst_port = th->dest;
- l4type |= IXGBE_ATR_L4TYPE_TCP;
- /* l4type IPv4 type is 0, no need to assign */
- } else {
- /* Unsupported L4 header, just bail here */
+ /* Right now, we support IPv4 w/ TCP only */
+ if (protocol != htons(ETH_P_IP) ||
+ iph->protocol != IPPROTO_TCP)
return;
- }
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
IXGBE_TX_FLAGS_VLAN_SHIFT;
- src_ipv4_addr = iph->saddr;
- dst_ipv4_addr = iph->daddr;
- flex_bytes = eth->h_proto;
+
+ th = tcp_hdr(skb);
ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
- ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
- ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
- ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+ ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+ ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+ ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+ ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
/* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+ ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+ ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
}
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -6240,27 +6498,29 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
return 0;
}
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
-
#ifdef IXGBE_FCOE
- if ((skb->protocol == htons(ETH_P_FCOE)) ||
- (skb->protocol == htons(ETH_P_FIP))) {
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
+
+ if ((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) {
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6292,10 +6552,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
+ struct net_device *netdev = tx_ring->netdev;
struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
@@ -6303,6 +6564,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
int tso;
int count = 0;
unsigned int f;
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6587,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (skb->protocol == htons(ETH_P_FCOE) ||
- skb->protocol == htons(ETH_P_FIP))) {
+ (protocol == htons(ETH_P_FCOE) ||
+ protocol == htons(ETH_P_FIP))) {
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6598,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
}
#endif
/* flag for FCoE offloads */
- if (skb->protocol == htons(ETH_P_FCOE))
+ if (protocol == htons(ETH_P_FCOE))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
#endif
@@ -6350,8 +6614,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
+ if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
@@ -6368,9 +6632,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+ protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -6378,30 +6643,30 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+ protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
+ test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
txq->tx_bytes += skb->len;
txq->tx_packets++;
- ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
- ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else {
dev_kfree_skb_any(skb);
@@ -6418,7 +6683,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
struct ixgbe_ring *tx_ring;
tx_ring = adapter->tx_ring[skb->queue_mapping];
- return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
/**
@@ -6559,20 +6824,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
/* accurate rx/tx bytes/packets stats */
dev_txq_stats_fold(netdev, stats);
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
- do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
}
-
+ rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
@@ -6687,11 +6955,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
- u32 part_num, eec;
+ u32 eec;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -6754,8 +7023,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -6778,7 +7047,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strcpy(netdev->name, pci_name(pdev));
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
adapter->bd_number = cards_found;
@@ -6828,8 +7097,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Make it possible the adapter to be woken up via WOL */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
/*
* If there is a fan on this device and it has failed log the
@@ -6937,8 +7212,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- /* power down the optics */
- if (hw->phy.multispeed_fiber)
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
hw->mac.ops.disable_tx_laser(hw);
init_timer(&adapter->watchdog_timer);
@@ -6953,6 +7231,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ break;
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6976,16 +7266,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
"Unknown"),
netdev->dev_addr);
- ixgbe_read_pba_num_generic(hw, &part_num);
+
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
- "PBA No: %06x-%03x\n",
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- (part_num >> 8), (part_num & 0xff));
+ part_str);
else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type,
- (part_num >> 8), (part_num & 0xff));
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7078,17 +7369,19 @@ err_dma:
**/
static void __devexit ixgbe_remove(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
set_bit(__IXGBE_DOWN, &adapter->state);
- /* clear the module not found bit to make sure the worker won't
- * reschedule
+
+ /*
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
*/
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
-
del_timer_sync(&adapter->sfp_timer);
+
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->sfp_task);
cancel_work_sync(&adapter->multispeed_fiber_task);
@@ -7096,7 +7389,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
- flush_scheduled_work();
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ cancel_work_sync(&adapter->check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7149,8 +7443,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -7173,8 +7467,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
int err;
@@ -7212,8 +7505,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
*/
static void ixgbe_io_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
if (netif_running(netdev)) {
if (ixgbe_up(adapter)) {
@@ -7278,6 +7571,7 @@ static void __exit ixgbe_exit_module(void)
dca_unregister_notify(&dca_notifier);
#endif
pci_unregister_driver(&ixgbe_driver);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb98..027c628c3aae 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ default:
+ break;
+ }
if (vflre & (1 << vf_shift)) {
ret_val = 0;
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return;
-
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = IXGBE_VFMAILBOX_SIZE;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ break;
+ default:
+ break;
+ }
}
-struct ixgbe_mbx_operations mbx_ops_82599 = {
+struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b53..3df9b1590218 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
-extern struct ixgbe_mbx_operations mbx_ops_82599;
+extern struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..8f7123e8fc0a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & MDIO_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+**/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
* ixgbe_tn_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..e2c6b7eac641 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
bool *link_up);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f3..6e3e94b5a5f6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
* addresses
*/
for (i = 0; i < entries; i++) {
- vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
}
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c973..59f6d0afe0fe 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -54,9 +54,14 @@
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_X540T 0x1528
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -994,8 +999,10 @@
/* PHY IDs*/
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
+#define AQ_FW_REV 0x20
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1463,6 +1470,8 @@
#define IXGBE_ANLP1_PAUSE 0x0C00
#define IXGBE_ANLP1_SYM_PAUSE 0x0400
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1500,7 @@
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1510,18 @@
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
#define IXGBE_EEPROM_OPCODE_BITS 8
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
#define IXGBE_PCIE_GENERAL_PTR 0x06
#define IXGBE_PCIE_CONFIG0_PTR 0x07
#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -2113,6 +2129,14 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+/* Flow Control Macros */
+#define PAUSE_RTT 8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+ PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2188,7 @@ struct ixgbe_atr_input_masks {
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
+ ixgbe_flash,
ixgbe_eeprom_none /* No NVM support */
};
@@ -2171,12 +2196,14 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
+ ixgbe_mac_X540,
ixgbe_num_macs
};
enum ixgbe_phy_type {
ixgbe_phy_unknown = 0,
ixgbe_phy_tn,
+ ixgbe_phy_aq,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2405,6 +2432,7 @@ struct ixgbe_eeprom_operations {
s32 (*write)(struct ixgbe_hw *, u16, u16);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -2574,6 +2602,7 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
+ bool force_full_reset;
};
struct ixgbe_info {
@@ -2614,6 +2643,9 @@ struct ixgbe_info {
#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_ERR_RAR_INDEX -27
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..cf88515c0ef8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,722 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+//#include "ixgbe_mbx.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status = 0;
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 reset_bit;
+ u32 i;
+ u32 autoc;
+ u32 autoc2;
+ bool link_up = false;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests before reset
+ */
+ status = ixgbe_disable_pcie_master(hw);
+ if (status != 0) {
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+ }
+
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ if (hw->force_full_reset) {
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ } else {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (!link_up)
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ else
+ reset_bit = IXGBE_CTRL_RST;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ msleep(50);
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ if (autoc != hw->mac.orig_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+ IXGBE_AUTOC_AN_RESTART));
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EERPOM
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ u32 eewr;
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+out:
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_update_eeprom_checksum_generic(hw);
+
+ if (status)
+ status = ixgbe_update_flash_X540(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ hw_dbg(hw, "Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ break;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask),
+ * hardware currently using resource (hwmask),
+ * or other software thread currently using
+ * resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ }
+ }
+
+ /*
+ * If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the
+ * SW bit(s) of the requested resource(s) while ignoring the
+ * corresponding FW/HW bits in the SW_FW_SYNC register.
+ */
+ if (i >= timeout) {
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ }
+ }
+
+ msleep(5);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ udelay(50);
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static struct ixgbe_mac_operations mac_ops_X540 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .start_hw = &ixgbe_start_hw_generic,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_device_caps = NULL,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .read_analog_reg8 = NULL,
+ .write_analog_reg8 = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .check_link = &ixgbe_check_mac_link_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_generic,
+ .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
+ .setup_sfp = NULL,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+ .init_params = &ixgbe_init_eeprom_params_X540,
+ .read = &ixgbe_read_eerd_X540,
+ .write = &ixgbe_write_eewr_X540,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+ .identify = &ixgbe_identify_phy_generic,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = NULL,
+ .reset = &ixgbe_reset_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+ .mac = ixgbe_mac_X540,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X540,
+ .eeprom_ops = &eeprom_ops_X540,
+ .phy_ops = &phy_ops_X540,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8cc..1f35d229e71a 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a05..f8a807d606c7 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4cc817acfb62..fa29b3c8c464 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -544,7 +544,7 @@ struct ixgbevf_reg_test {
#define TABLE64_TEST_HI 6
/* default VF register test */
-static struct ixgbevf_reg_test reg_test_vf[] = {
+static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
@@ -557,19 +557,23 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
{ 0, 0, 0, 0 }
};
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
- const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
- writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
- if (val != (_test[pat] & W & M)) { \
+ if (val != (register_test_patterns[pat] & W & M)) { \
hw_dbg(&adapter->hw, \
"pattern test reg %04X failed: got " \
"0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
+ R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
@@ -596,7 +600,7 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
{
- struct ixgbevf_reg_test *test;
+ const struct ixgbevf_reg_test *test;
u32 i;
test = reg_test_vf;
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa2..0cd6abcf9306 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c9652389..809e38ce8a13 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 82599 Virtual Function";
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.12-k0"
const char ixgbevf_driver_version[] = DRV_VERSION;
-static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static char ixgbevf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_vf_info,
@@ -2488,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
- memset(tx_ring->tx_buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info) {
hw_dbg(&adapter->hw,
"Unable to vmalloc buffer memory for "
"the receive descriptor ring\n");
goto alloc_failed;
}
- memset(rx_ring->rx_buffer_info, 0, size);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3424,10 +3423,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (hw->mac.ops.get_bus_info)
hw->mac.ops.get_bus_info(hw);
-
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -3436,6 +3431,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
adapter->netdev_registered = true;
+ netif_carrier_off(netdev);
+
ixgbevf_init_last_counter_stats(adapter);
/* print the MAC address */
@@ -3487,10 +3484,9 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->watchdog_timer);
+ cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- flush_scheduled_work();
-
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a65..7a8833125770 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7f..b2b5bf5daa3d 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec1..fb80ca1bcc93 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfaf..971019d819b4 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc831424..144c99d5363a 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3119c2..e97ebef3cf47 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -135,7 +135,7 @@ jme_reset_phy_processor(struct jme_adapter *jme)
static void
jme_setup_wakeup_frame(struct jme_adapter *jme,
- u32 *mask, u32 crc, int fnr)
+ const u32 *mask, u32 crc, int fnr)
{
int i;
@@ -163,7 +163,7 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
static inline void
jme_reset_mac_processor(struct jme_adapter *jme)
{
- u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
+ static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
u32 crc = 0xCDCDCDCD;
u32 gpreg0;
int i;
@@ -2076,12 +2076,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
}
if (new_mtu > 1900) {
- netdev->features &= ~(NETIF_F_HW_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6);
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6);
} else {
if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (test_bit(JME_FLAG_TSO, &jme->flags))
netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
}
@@ -2514,10 +2513,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
if (on) {
set_bit(JME_FLAG_TXCSUM, &jme->flags);
if (netdev->mtu <= 1900)
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
} else {
clear_bit(JME_FLAG_TXCSUM, &jme->flags);
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &=
+ ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
}
return 0;
@@ -2797,7 +2798,8 @@ jme_init_one(struct pci_dev *pdev,
netdev->netdev_ops = &jme_netdev_ops;
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
- netdev->features = NETIF_F_HW_CSUM |
+ netdev->features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
@@ -2955,11 +2957,7 @@ jme_init_one(struct pci_dev *pdev,
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
/* driver bus management functions */
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ ks8851_net_stop(dev);
+ }
+
+ return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ ks8851_net_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
static int __devinit ks8851_probe(struct spi_device *spi)
{
struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
},
.probe = ks8851_probe,
.remove = __devexit_p(ks8851_remove),
+ .suspend = ks8851_suspend,
+ .resume = ks8851_resume,
};
static int __init ks8851_init(void)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 37504a398906..49ea8708d6d0 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -3570,7 +3570,7 @@ static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
* This routine is used to program Wake-on-LAN pattern.
*/
static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
- u8 *mask, uint frame_size, u8 *pattern)
+ const u8 *mask, uint frame_size, const u8 *pattern)
{
int bits;
int from;
@@ -3626,9 +3626,9 @@ static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
*
* This routine is used to add ARP pattern for waking up the host.
*/
-static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
{
- u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
u8 pattern[42] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3651,8 +3651,8 @@ static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
*/
static void hw_add_wol_bcast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
- u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ static const u8 mask[] = { 0x3F };
+ static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
}
@@ -3669,7 +3669,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
*/
static void hw_add_wol_mcast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
+ static const u8 mask[] = { 0x3F };
u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
memcpy(&pattern[3], &hw->override_addr[3], 3);
@@ -3687,7 +3687,7 @@ static void hw_add_wol_mcast(struct ksz_hw *hw)
*/
static void hw_add_wol_ucast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
+ static const u8 mask[] = { 0x3F };
hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
}
@@ -3700,7 +3700,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
*
* This routine is used to enable Wake-on-LAN depending on driver settings.
*/
-static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
{
hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
@@ -6208,7 +6208,7 @@ static int netdev_set_wol(struct net_device *dev,
struct dev_info *hw_priv = priv->adapter;
/* Need to find a way to retrieve the device IP address. */
- u8 net_addr[] = { 192, 168, 1, 1 };
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
if (wol->wolopts & ~hw_priv->wol_support)
return -EINVAL;
@@ -7241,7 +7241,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
struct ksz_hw *hw = &hw_priv->hw;
/* Need to find a way to retrieve the device IP address. */
- u8 net_addr[] = { 192, 168, 1, 1 };
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
#define LANCE_BUS_IF 0x16
#define LANCE_TOTAL_SIZE 0x18
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178b..da74db4a03d4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
static int __ei_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
*/
static int __ei_close(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int send_length = skb->len, output_page;
unsigned long flags;
char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = ei_inb(e8390_base + EN0_TSR);
ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *__ei_get_stats(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
static void __ei_set_multicast_list(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
spin_lock_irqsave(&ei_local->page_lock, flags);
do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(version);
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
static void __NS8390_init(struct net_device *dev, int startp)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 9f8e7027b0b3..e2c2a7202dba 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -692,7 +692,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20db..6ed577b065df 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ bool passthru;
};
#define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
macvlan_broadcast(skb, port, NULL,
MACVLAN_MODE_PRIVATE |
MACVLAN_MODE_VEPA |
+ MACVLAN_MODE_PASSTHRU|
MACVLAN_MODE_BRIDGE);
else if (src->mode == MACVLAN_MODE_VEPA)
/* flood to everyone except source */
@@ -185,7 +187,10 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
return skb;
}
- vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (port->passthru)
+ vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
return skb;
@@ -243,18 +248,22 @@ xmit_world:
netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
unsigned int len = skb->len;
int ret;
+ const struct macvlan_dev *vlan = netdev_priv(dev);
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
+ struct macvlan_pcpu_stats *pcpu_stats;
+ pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@ static int macvlan_open(struct net_device *dev)
struct net_device *lowerdev = vlan->lowerdev;
int err;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, 1);
+ goto hash_add;
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -296,6 +310,8 @@ static int macvlan_open(struct net_device *dev)
if (err < 0)
goto del_unicast;
}
+
+hash_add:
macvlan_hash_add(vlan);
return 0;
@@ -310,12 +326,18 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, -1);
+ goto hash_del;
+ }
+
dev_mc_unsync(lowerdev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
dev_uc_del(lowerdev, dev->dev_addr);
+hash_del:
macvlan_hash_del(vlan);
return 0;
}
@@ -414,14 +436,15 @@ static int macvlan_init(struct net_device *dev)
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
+ dev->features |= NETIF_F_LLTX;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
- vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
- if (!vlan->rx_stats)
+ vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ if (!vlan->pcpu_stats)
return -ENOMEM;
return 0;
@@ -431,7 +454,7 @@ static void macvlan_uninit(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- free_percpu(vlan->rx_stats);
+ free_percpu(vlan->pcpu_stats);
}
static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
{
struct macvlan_dev *vlan = netdev_priv(dev);
- dev_txq_stats_fold(dev, stats);
-
- if (vlan->rx_stats) {
- struct macvlan_rx_stats *p, accum = {0};
- u64 rx_packets, rx_bytes, rx_multicast;
+ if (vlan->pcpu_stats) {
+ struct macvlan_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
int i;
for_each_possible_cpu(i) {
- p = per_cpu_ptr(vlan->rx_stats, i);
+ p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
- accum.rx_packets += rx_packets;
- accum.rx_bytes += rx_bytes;
- accum.rx_multicast += rx_multicast;
- /* rx_errors is an ulong, updated without syncp protection */
- accum.rx_errors += p->rx_errors;
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* rx_errors & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
}
- stats->rx_packets = accum.rx_packets;
- stats->rx_bytes = accum.rx_bytes;
- stats->rx_errors = accum.rx_errors;
- stats->rx_dropped = accum.rx_errors;
- stats->multicast = accum.rx_multicast;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_errors;
+ stats->tx_dropped = tx_dropped;
}
return stats;
}
@@ -549,6 +577,7 @@ static int macvlan_port_create(struct net_device *dev)
if (port == NULL)
return -ENOMEM;
+ port->passthru = false;
port->dev = dev;
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
case MACVLAN_MODE_PRIVATE:
case MACVLAN_MODE_VEPA:
case MACVLAN_MODE_BRIDGE:
+ case MACVLAN_MODE_PASSTHRU:
break;
default:
return -EINVAL;
@@ -601,25 +631,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int macvlan_get_tx_queues(struct net *net,
- struct nlattr *tb[],
- unsigned int *num_tx_queues,
- unsigned int *real_num_tx_queues)
-{
- struct net_device *real_dev;
-
- if (!tb[IFLA_LINK])
- return -EINVAL;
-
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
- return -ENODEV;
-
- *num_tx_queues = real_dev->num_tx_queues;
- *real_num_tx_queues = real_dev->real_num_tx_queues;
- return 0;
-}
-
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
}
port = macvlan_port_get(lowerdev);
+ /* Only 1 macvlan device can be created in passthru mode */
+ if (port->passthru)
+ return -EINVAL;
+
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
@@ -671,6 +686,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ if (!list_empty(&port->vlans))
+ return -EINVAL;
+ port->passthru = true;
+ memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+ }
+
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -743,7 +765,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
ops->priv_size = sizeof(struct macvlan_dev);
- ops->get_tx_queues = macvlan_get_tx_queues;
ops->validate = macvlan_validate;
ops->maxtype = IFLA_MACVLAN_MAX;
ops->policy = macvlan_policy;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 42567279843e..21845affea13 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -504,8 +504,7 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- vnet_hdr->csum_start = skb->csum_start -
- skb_headroom(skb);
+ vnet_hdr->csum_start = skb_checksum_start_offset(skb);
vnet_hdr->csum_offset = skb->csum_offset;
} /* else everything is zero */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index dd2b6a71c6d7..02076e16542a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1514,11 +1514,6 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
return genphy_restart_aneg(mp->phy);
}
-static u32 mv643xx_eth_get_link(struct net_device *dev)
-{
- return !!netif_carrier_ok(dev);
-}
-
static int
mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
@@ -1658,7 +1653,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.set_settings = mv643xx_eth_set_settings,
.get_drvinfo = mv643xx_eth_get_drvinfo,
.nway_reset = mv643xx_eth_nway_reset,
- .get_link = mv643xx_eth_get_link,
+ .get_link = ethtool_op_get_link,
.get_coalesce = mv643xx_eth_get_coalesce,
.set_coalesce = mv643xx_eth_set_coalesce,
.get_ringparam = mv643xx_eth_get_ringparam,
@@ -2983,7 +2978,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
unregister_netdev(mp->dev);
if (mp->phy != NULL)
phy_detach(mp->phy);
- flush_scheduled_work();
+ cancel_work_sync(&mp->tx_timeout_task);
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 8524cc40ec57..a37fcf11ab36 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2736,7 +2736,7 @@ again:
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- cksum_offset = skb_transport_offset(skb);
+ cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
@@ -4067,7 +4067,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
if (mgp == NULL)
return;
- flush_scheduled_work();
+ cancel_work_sync(&mgp->watchdog_work);
netdev = mgp->dev;
unregister_netdev(netdev);
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f110..30be8c634ebd 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
unsigned char bus_width;
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
int start_page, stop_page;
int reg0, ret;
static unsigned version_printed;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char bus_width;
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
#ifdef NE_SANITY_CHECK
int xfer_count = count;
#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
static void ne_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 37d3ebd65be8..e42d26e03af5 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -655,7 +655,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
}
static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
- u8 *addr, struct list_head *del_list)
+ const u8 *addr, struct list_head *del_list)
{
struct list_head *head;
nx_mac_list_t *cur;
@@ -686,7 +686,9 @@ static void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
struct list_head *head;
@@ -869,9 +871,11 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
u64 word;
int i, rv;
- u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
memset(&req, 0, sizeof(nx_nic_req_t));
@@ -895,7 +899,7 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
((u64)(enable & 0x1) << 8) |
((0x7ULL) << 48);
req.words[0] = cpu_to_le64(word);
- for (i = 0; i < 5; i++)
+ for (i = 0; i < ARRAY_SIZE(key); i++)
req.words[i+1] = cpu_to_le64(key[i]);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa279..731077d8d962 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
- cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
netdev->name);
goto err_out;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
break;
}
- rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
- vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL) {
printk(KERN_ERR "%s: Failed to allocate "
"rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
/* free whatever was already allocated */
goto err_out;
}
- memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba9517404..ceeaac989df2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
@@ -1280,6 +1277,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int i = 0, err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
+ u32 val;
if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1355,8 +1353,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (reset_devices) {
- if (adapter->portnum == 0) {
+ if (adapter->portnum == 0) {
+ val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ if (val != 0xffffffff && val != 0) {
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
adapter->need_fw_reset = 1;
}
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 33618edc61f9..d973fc6c6b88 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -388,9 +388,9 @@ static long memend; /* e.g 0xd4000 */
struct net_device * __init ni52_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct priv));
- static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+ static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
+ const int *port;
struct priv *p;
- int *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index da228a0dd6cd..c75ae85eb918 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -361,8 +361,8 @@ static int dma;
struct net_device * __init ni65_probe(int unit)
{
struct net_device *dev = alloc_etherdev(0);
- static int ports[] = {0x360,0x300,0x320,0x340, 0};
- int *port;
+ static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
+ const int *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 781e368329f9..2541321bad82 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6589,7 +6589,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
(ip_proto == IPPROTO_UDP ?
TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
- start = skb_transport_offset(skb) -
+ start = skb_checksum_start_offset(skb) -
(pad_bytes + sizeof(struct tx_pkt_hdr));
stuff = start + skb->csum_offset;
@@ -9917,7 +9917,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ flush_work_sync(&np->reset_task);
niu_netif_stop(np);
del_timer_sync(&np->timer);
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c0edc9..c8c873b31a89 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
}
/**
- * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
- * @netdev: Network interface device structure
- * Returns
- * true(1): Checksum On
- * false(0): Checksum Off
- */
-static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-/**
* pch_gbe_set_tx_csum - Turn transmit checksums on or off
* @netdev: Network interface device structure
* @data: Checksum on[true] or off[false]
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
adapter->tx_csum = data;
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
- return 0;
+ return ethtool_op_set_tx_ipv6_csum(netdev, data);
}
/**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
.set_pauseparam = pch_gbe_set_pauseparam,
.get_rx_csum = pch_gbe_get_rx_csum,
.set_rx_csum = pch_gbe_set_rx_csum,
- .get_tx_csum = pch_gbe_get_tx_csum,
.set_tx_csum = pch_gbe_set_tx_csum,
.get_strings = pch_gbe_get_strings,
.get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..d7355306a738 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
int desNo;
size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info) {
pr_err("Unable to allocate memory for the buffer infomation\n");
return -ENOMEM;
}
- memset(tx_ring->buffer_info, 0, size);
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
int desNo;
size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info) {
pr_err("Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
}
- memset(rx_ring->buffer_info, 0, size);
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -2321,7 +2319,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
- netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
pch_gbe_set_ethtool_ops(netdev);
pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2360,9 +2358,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
pch_gbe_check_options(adapter);
if (adapter->tx_csum)
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
else
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
/* initialize the wol settings based on the eeprom settings */
adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@ -2464,8 +2462,8 @@ static void __exit pch_gbe_exit_module(void)
module_init(pch_gbe_init_module);
module_exit(pch_gbe_exit_module);
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_TXD),
.def = PCH_GBE_DEFAULT_TXD,
- .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD,
+ .max = PCH_GBE_MAX_TXD } }
};
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_RXD),
.def = PCH_GBE_DEFAULT_RXD,
- .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD,
+ .max = PCH_GBE_MAX_RXD } }
};
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..1f42f6ac8551 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- caddr_t base;
- struct timer_list watchdog;
- int stale, fast_poll;
- u_short link_status;
- u_char duplex_flag;
- int phy_id;
- int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
} axnet_dev_t;
static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
if (info->flags & IS_AX88790)
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+ info->active_low = 0;
+
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
if ((j != 0) && (j != 0xffff)) break;
}
- /* Maybe PHY is in power down mode. (PPD_SET = 1)
- Bit 2 of CCSR is active low. */
if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
if (j == j2) continue;
- if ((j != 0) && (j != 0xffff)) break;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
}
}
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
static int axnet_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
axnet_reset_8390(dev);
AX88190_init(dev, 1);
netif_device_attach(dev);
@@ -680,6 +690,7 @@ static void block_output(struct net_device *dev, int count,
static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
@@ -865,7 +876,7 @@ static void do_set_multicast_list(struct net_device *dev);
static int ax_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Grab the page lock so we own the register set, then call
@@ -916,7 +927,7 @@ static int ax_close(struct net_device *dev)
static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -963,7 +974,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int length, send_length, output_page;
unsigned long flags;
u8 packet[ETH_ZLEN];
@@ -1260,7 +1271,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = inb(e8390_base + EN0_TSR);
/*
@@ -1344,7 +1355,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -1529,7 +1540,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -1578,7 +1589,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
@@ -1636,7 +1647,7 @@ static void AX88190_init(struct net_device *dev, int startp)
{
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
@@ -1702,7 +1713,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
if (inb_p(e8390_base) & E8390_TRANS)
{
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 0a2b0f9cdf33..76683d97d83b 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1291,7 +1291,7 @@ updateCRC
static void updateCRC(int *CRC, int bit)
{
- int poly[]={
+ static const int poly[]={
1,1,1,0, 1,1,0,1,
1,0,1,1, 1,0,0,0,
1,0,0,0, 0,0,1,1,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d05c44692f08..2c158910f7ea 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1493,7 +1493,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
- PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb3d13e4e074..35fda5ac8120 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -64,7 +64,7 @@ config BCM63XX_PHY
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
- Currently supports the IP175C PHY.
+ Currently supports the IP175C and IP1001 PHYs.
config REALTEK_PHY
tristate "Drivers for Realtek PHYs"
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index c1d2d251fe8b..9a09e24c30bc 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,7 +30,7 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
@@ -89,6 +89,33 @@ static int ip175c_config_init(struct phy_device *phydev)
return 0;
}
+static int ip1001_config_init(struct phy_device *phydev)
+{
+ int err, value;
+
+ /* Software Reset PHY */
+ value = phy_read(phydev, MII_BMCR);
+ value |= BMCR_RESET;
+ err = phy_write(phydev, MII_BMCR, value);
+ if (err < 0)
+ return err;
+
+ do {
+ value = phy_read(phydev, MII_BMCR);
+ } while (value & BMCR_RESET);
+
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at GMII/ RGMII interface */
+ value = phy_read(phydev, 16);
+ value |= 0x3;
+
+ err = phy_write(phydev, 16, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
static int ip175c_read_status(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
@@ -121,21 +148,43 @@ static struct phy_driver ip175c_driver = {
.driver = { .owner = THIS_MODULE,},
};
-static int __init ip175c_init(void)
+static struct phy_driver ip1001_driver = {
+ .phy_id = 0x02430d90,
+ .name = "ICPlus IP1001",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause,
+ .config_init = &ip1001_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init icplus_init(void)
{
+ int ret = 0;
+
+ ret = phy_driver_register(&ip1001_driver);
+ if (ret < 0)
+ return -ENODEV;
+
return phy_driver_register(&ip175c_driver);
}
-static void __exit ip175c_exit(void)
+static void __exit icplus_exit(void)
{
+ phy_driver_unregister(&ip1001_driver);
phy_driver_unregister(&ip175c_driver);
}
-module_init(ip175c_init);
-module_exit(ip175c_exit);
+module_init(icplus_init);
+module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
+ { 0x02430d90, 0x0ffffff0 },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f0bd1a1aba3a..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#define MII_MARVELL_PHY_PAGE 22
+
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -80,7 +83,6 @@
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
-#define MII_88E1121_PHY_PAGE 22
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ int len, i, saved_page, current_page, page_changed, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+ if (!paddr || len < (4 * sizeof(*paddr)))
+ return 0;
+
+ saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ if (saved_page < 0)
+ return saved_page;
+ page_changed = 0;
+ current_page = saved_page;
+
+ ret = 0;
+ len /= sizeof(*paddr);
+ for (i = 0; i < len - 3; i += 4) {
+ u16 reg_page = be32_to_cpup(paddr + i);
+ u16 reg = be32_to_cpup(paddr + i + 1);
+ u16 mask = be32_to_cpup(paddr + i + 2);
+ u16 val_bits = be32_to_cpup(paddr + i + 3);
+ int val;
+
+ if (reg_page != current_page) {
+ current_page = reg_page;
+ page_changed = 1;
+ ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (ret < 0)
+ goto err;
+ }
+
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, reg);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, reg, val);
+ if (ret < 0)
+ goto err;
+
+ }
+err:
+ if (page_changed) {
+ i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ if (ret == 0)
+ ret = i;
+ }
+ return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
static int m88e1121_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = genphy_config_aneg(phydev);
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
if (err < 0)
return err;
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0002);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
if (err < 0)
return err;
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0003);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
if (err < 0)
return err;
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
/* Reset address */
- err = phy_write(phydev, 0x16, 0x0);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Change address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ if (err < 0)
+ return err;
+
+ /* Enable 1000 Mbit */
+ err = phy_write(phydev, 0x15, 0x1048);
+ if (err < 0)
+ return err;
+
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
+ /* Reset address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
if (err < 0)
return err;
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = MARVELL_PHY_ID_88E1149R,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1149R",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &m88e1149_config_init,
+ .config_aneg = &m88e1118_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = { .owner = THIS_MODULE },
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ 0x01410e10, 0xfffffff0 },
{ 0x01410cb0, 0xfffffff0 },
{ 0x01410cd0, 0xfffffff0 },
+ { 0x01410e50, 0xfffffff0 },
{ 0x01410e30, 0xfffffff0 },
{ 0x01410e90, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93f..a8445c72fc13 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
phydev->link ? "Up" : "Down");
if (phydev->link)
- printk(" - %d/%s", phydev->speed,
+ printk(KERN_CONT " - %d/%s", phydev->speed,
DUPLEX_FULL == phydev->duplex ?
"Full" : "Half");
- printk("\n");
+ printk(KERN_CONT "\n");
}
EXPORT_SYMBOL(phy_print_status);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..b708f68471a6 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: outbound frame not passed\n");
kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
@@ -1758,8 +1756,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: inbound frame "
"not passed\n");
@@ -1767,8 +1764,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
return;
}
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
@@ -2584,16 +2580,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
*/
dev_net_set(dev, net);
- ret = -EEXIST;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
- *retp = unit;
+ ret = unit;
goto out2;
}
} else {
+ ret = -EEXIST;
if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
@@ -2668,10 +2664,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
+ unit_put(&pn->units_idr, ppp->file.index);
} else
ppp_unlock(ppp);
- unit_put(&pn->units_idr, ppp->file.index);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2855,7 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
{
int unit, err;
@@ -2871,10 +2866,24 @@ again:
}
err = idr_get_new_above(p, ptr, n, &unit);
- if (err == -EAGAIN)
- goto again;
+ if (err < 0) {
+ if (err == -EAGAIN)
+ goto again;
+ return err;
+ }
+
+ return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+ int unit;
- if (unit != n) {
+ unit = __unit_alloc(p, ptr, n);
+ if (unit < 0)
+ return unit;
+ else if (unit != n) {
idr_remove(p, unit);
return -EINVAL;
}
@@ -2885,19 +2894,7 @@ again:
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, 0, &unit);
- if (err == -EAGAIN)
- goto again;
-
- return unit;
+ return __unit_alloc(p, ptr, 0);
}
/* put unit number back to a pool */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index d72fb0519a2a..78c0e3c9b2b5 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
abort:
kfree_skb(skb);
- return 0;
+ return 1;
}
/************************************************************************
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bfa..164cfad6ce79 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -277,7 +277,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
iph->tos = 0;
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
- iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
iph->tot_len = htons(skb->len);
skb_dst_drop(skb);
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
int err = 0;
pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
- callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
- GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
if (!callid_sock) {
pr_err("PPTP: cann't allocate memory\n");
return -ENOMEM;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 18c0297743f1..1b63c8aef121 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1450,16 +1450,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
strncpy(info->bus_info, "N/A", 32);
}
-static u32 pxa168_get_link(struct net_device *dev)
-{
- return !!netif_carrier_ok(dev);
-}
-
static const struct ethtool_ops pxa168_ethtool_ops = {
.get_settings = pxa168_get_settings,
.set_settings = pxa168_set_settings,
.get_drvinfo = pxa168_get_drvinfo,
- .get_link = pxa168_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1607,7 +1602,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
- flush_scheduled_work();
+ cancel_work_sync(&pep->tx_timeout_task);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34ab..1a3584edd79c 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@ map_error:
static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct pci_dev *pdev = qdev->pdev;
netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
static void ql3xxx_tx_timeout(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
netdev_err(ndev, "Resetting...\n");
/*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b74..9c2a02d204dc 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 11
-#define QLCNIC_LINUX_VERSIONID "5.0.11"
+#define _QLCNIC_LINUX_SUBVERSION 14
+#define QLCNIC_LINUX_VERSIONID "5.0.14"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -798,7 +781,6 @@ struct qlcnic_nic_intr_coalesce {
#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
-#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
#define QLCNIC_C2C_OPCODE 22
@@ -923,6 +905,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MACSPOOF 0x200
#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
#define QLCNIC_PROMISC_DISABLED 0x800
+#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -942,6 +925,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_LED_TEST 3
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
@@ -1126,8 +1110,7 @@ struct qlcnic_eswitch {
/* Return codes for Error handling */
#define QL_STATUS_INVALID_PARAM -1
-#define MAX_BW 100
-#define MIN_BW 1
+#define MAX_BW 100 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define MAX_TX_QUEUES 1
@@ -1135,7 +1118,7 @@ struct qlcnic_eswitch {
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
-#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
@@ -1314,21 +1297,15 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
/* Functions from qlcnic_main.c */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
int qlcnic_reset_context(struct qlcnic_adapter *);
u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-int qlcnic_check_loopback_buff(unsigned char *data);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
/* Management functions */
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1354,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
{0x1077, 0x8020, 0x103c, 0x3733,
"NC523SFP 10Gb 2-port Server Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6b..27631f23b3fd 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
@@ -480,6 +463,11 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
{
int err;
+ if (adapter->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(adapter->pdev);
+ adapter->flags &= ~QLCNIC_NEED_FLR;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(adapter);
if (err)
return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c4..1e7af709d395 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
@@ -101,8 +84,7 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
- "Interrupt_Test_offline",
- "Loopback_Test_offline"
+ "Interrupt_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
}
}
-#define QLC_ILB_PKT_SIZE 64
-#define QLC_NUM_ILB_PKT 16
-#define QLC_ILB_MAX_RCV_LOOP 10
-
-static void qlcnic_create_loopback_buff(unsigned char *data)
-{
- unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
- memset(data, 0x4e, QLC_ILB_PKT_SIZE);
- memset(data, 0xff, 12);
- memcpy(data + 12, random_data, sizeof(random_data));
-}
-
-int qlcnic_check_loopback_buff(unsigned char *data)
-{
- unsigned char buff[QLC_ILB_PKT_SIZE];
- qlcnic_create_loopback_buff(buff);
- return memcmp(data, buff, QLC_ILB_PKT_SIZE);
-}
-
-static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
- struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
- struct sk_buff *skb;
- int i, loop, cnt = 0;
-
- for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
- skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
- qlcnic_create_loopback_buff(skb->data);
- skb_put(skb, QLC_ILB_PKT_SIZE);
-
- adapter->diag_cnt = 0;
- qlcnic_xmit_frame(skb, adapter->netdev);
-
- loop = 0;
- do {
- msleep(1);
- qlcnic_process_rcv_ring_diag(sds_ring);
- } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
- !adapter->diag_cnt);
-
- dev_kfree_skb_any(skb);
-
- if (!adapter->diag_cnt)
- dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
- " not recevied\n", i + 1);
- else
- cnt++;
- }
- if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
- return -1;
- }
- return 0;
-}
-
-static int qlcnic_loopback_test(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
-
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
- dev_warn(&adapter->pdev->dev, "Loopback test not supported"
- "for non privilege function\n");
- return 0;
- }
-
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- return -EIO;
-
- if (qlcnic_request_quiscent_mode(adapter)) {
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- return -EIO;
- }
-
- ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
- if (ret)
- goto clear_it;
-
- ret = qlcnic_set_ilb_mode(adapter);
- if (ret)
- goto done;
-
- ret = qlcnic_do_ilb_test(adapter);
-
- qlcnic_clear_ilb_mode(adapter);
-
-done:
- qlcnic_diag_free_res(netdev, max_sds_rings);
-
-clear_it:
- qlcnic_clear_quiscent_mode(adapter);
- adapter->max_sds_rings = max_sds_rings;
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- return ret;
-}
-
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -793,9 +677,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
- data[3] = qlcnic_loopback_test(dev);
- if (data[3])
- eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
@@ -925,9 +806,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
dev->features &= ~NETIF_F_LRO;
qlcnic_send_lro_cleanup(adapter);
+ dev_info(&adapter->pdev->dev,
+ "disabling LRO as rx_csum is off\n");
}
adapter->rx_csum = !!data;
- dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
return 0;
}
@@ -952,16 +834,27 @@ static int qlcnic_set_tso(struct net_device *dev, u32 data)
static int qlcnic_blink_led(struct net_device *dev, u32 val)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int dev_down = 0;
int ret;
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return -EIO;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ dev_down = 1;
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST);
+ if (ret) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+ }
+ }
ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n");
- return ret;
+ goto done;
}
msleep_interruptible(val * 1000);
@@ -970,10 +863,16 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to reset LED blink state.\n");
- return ret;
+ goto done;
}
- return 0;
+done:
+ if (dev_down) {
+ qlcnic_diag_free_res(dev, max_sds_rings);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ }
+ return ret;
+
}
static void
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1a..726ef555b6bc 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_HDR_H_
@@ -638,7 +621,7 @@ enum {
#define PCIX_INT_MASK (0x10104)
#define PCIX_OCM_WINDOW (0x10800)
-#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
#define PCIX_TARGET_STATUS (0x10118)
#define PCIX_TARGET_STATUS_F1 (0x10160)
@@ -722,7 +705,7 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a7ee27..616940f0a8d0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
@@ -398,7 +381,7 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -432,7 +415,9 @@ void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -638,10 +623,11 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
u64 word;
int i, rv;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
-
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -1234,56 +1220,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-
-static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
-{
- struct qlcnic_nic_req req;
- int rv;
- u64 word;
-
- memset(&req, 0, sizeof(struct qlcnic_nic_req));
- req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
-
- word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
- ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(flag);
-
- rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
- if (rv)
- dev_err(&adapter->pdev->dev,
- "%sting loopback mode failed.\n",
- flag ? "Set" : "Reset");
- return rv;
-}
-
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
-{
- if (qlcnic_set_fw_loopback(adapter, 1))
- return -EIO;
-
- if (qlcnic_nic_set_promisc(adapter,
- VPORT_MISS_MODE_ACCEPT_ALL)) {
- qlcnic_set_fw_loopback(adapter, 0);
- return -EIO;
- }
-
- msleep(1000);
- return 0;
-}
-
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
-{
- int mode = VPORT_MISS_MODE_DROP;
- struct net_device *netdev = adapter->netdev;
-
- qlcnic_set_fw_loopback(adapter, 0);
-
- if (netdev->flags & IFF_PROMISC)
- mode = VPORT_MISS_MODE_ACCEPT_ALL;
- else if (netdev->flags & IFF_ALLMULTI)
- mode = VPORT_MISS_MODE_ACCEPT_MULTI;
-
- qlcnic_nic_set_promisc(adapter, mode);
- msleep(1000);
-}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41fe..9b9c7c39d3ee 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/netdevice.h>
@@ -236,12 +219,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
- cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
goto err_out;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->dma_size + NET_IP_ALIGN;
break;
}
- rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
- vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL) {
dev_err(&netdev->dev, "Failed to allocate "
"rx buffer ring %d\n", ring);
goto err_out;
}
- memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -1693,99 +1673,6 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
spin_unlock(&rds_ring->lock);
}
-static void dump_skb(struct sk_buff *skb)
-{
- int i;
- unsigned char *data = skb->data;
-
- for (i = 0; i < skb->len; i++) {
- printk("%02x ", data[i]);
- if ((i & 0x0f) == 8)
- printk("\n");
- }
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring,
- int ring, u64 sts_data0)
-{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
- struct qlcnic_rx_buffer *buffer;
- struct sk_buff *skb;
- struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
-
- if (unlikely(ring >= adapter->max_rds_rings))
- return NULL;
-
- rds_ring = &recv_ctx->rds_rings[ring];
-
- index = qlcnic_get_sts_refhandle(sts_data0);
- if (unlikely(index >= rds_ring->num_desc))
- return NULL;
-
- buffer = &rds_ring->rx_buf_arr[index];
-
- length = qlcnic_get_sts_totallength(sts_data0);
- cksum = qlcnic_get_sts_status(sts_data0);
- pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
- skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
- if (!skb)
- return buffer;
-
- if (length > rds_ring->skb_size)
- skb_put(skb, rds_ring->skb_size);
- else
- skb_put(skb, length);
-
- if (pkt_offset)
- skb_pull(skb, pkt_offset);
-
- if (!qlcnic_check_loopback_buff(skb->data))
- adapter->diag_cnt++;
- else
- dump_skb(skb);
-
- dev_kfree_skb_any(skb);
- adapter->stats.rx_pkts++;
- adapter->stats.rxbytes += length;
-
- return buffer;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
- struct status_desc *desc;
- struct qlcnic_rx_buffer *rxbuf;
- u64 sts_data0;
-
- int opcode, ring, desc_cnt;
- u32 consumer = sds_ring->consumer;
-
- desc = &sds_ring->desc_head[consumer];
- sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
- if (!(sts_data0 & STATUS_OWNER_HOST))
- return;
-
- desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
- opcode = qlcnic_get_sts_opcode(sts_data0);
-
- ring = qlcnic_get_sts_type(sts_data0);
- rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
- ring, sts_data0);
-
- desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
- consumer = get_next_index(consumer, sds_ring->num_desc);
-
- sds_ring->consumer = consumer;
- writel(consumer, sds_ring->crb_sts_consumer);
-}
-
void
qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cdf9ab3..11e3a46c0911 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/slab.h>
@@ -1450,7 +1433,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1547,6 +1529,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_iounmap;
+ adapter->flags |= QLCNIC_NEED_FLR;
+
err = adapter->nic_ops->start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2855,61 +2839,6 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
-/* Caller should held RESETTING bit.
- * This should be call in sync with qlcnic_request_quiscent_mode.
- */
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
-{
- qlcnic_clr_drv_state(adapter);
- qlcnic_api_lock(adapter);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
- qlcnic_api_unlock(adapter);
-}
-
-/* Caller should held RESETTING bit.
- */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
-{
- u8 timeo = adapter->dev_init_timeo / 2;
- u32 state;
-
- if (qlcnic_api_lock(adapter))
- return -EIO;
-
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_READY)
- return -EIO;
-
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
- qlcnic_api_unlock(adapter);
- QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
- qlcnic_idc_debug_info(adapter, 0);
-
- qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
-
- do {
- msleep(2000);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_QUISCENT)
- return 0;
- if (!qlcnic_check_drv_state(adapter)) {
- if (qlcnic_api_lock(adapter))
- return -EIO;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_QUISCENT);
- qlcnic_api_unlock(adapter);
- QLCDB(adapter, DRV, "QUISCENT mode set\n");
- return 0;
- }
- } while (--timeo);
-
- dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
- " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
- QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
- qlcnic_clear_quiscent_mode(adapter);
- return -EIO;
-}
-
/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3588,9 +3517,12 @@ validate_esw_config(struct qlcnic_adapter *adapter,
case QLCNIC_PORT_DEFAULTS:
if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
QLCNIC_NON_PRIV_FUNC) {
- esw_cfg[i].mac_anti_spoof = 0;
- esw_cfg[i].mac_override = 1;
- esw_cfg[i].promisc_mode = 1;
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].mac_override != 1)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].promisc_mode != 1)
+ return QL_STATUS_INVALID_PARAM;
}
break;
case QLCNIC_ADD_VLAN:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..4757c59a07a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.25.00.00-01"
+#define DRV_VERSION "v1.00.00.27.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2083,6 +2083,7 @@ struct ql_adapter {
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
@@ -2221,6 +2222,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
u32 ram_addr, int word_count);
int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2238,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+ /*
+ * If the dump has already been taken and is stored
+ * in our internal buffer and if force dump is set then
+ * just start the spool to dump it to the log file
+ * and also, take a snapshot of the general regs to
+ * to the user's buffer or else take complete dump
+ * to the user's buffer if force is not set.
+ */
- if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (!ql_core_dump(qdev, buff))
+ ql_soft_reset_mpi_risc(qdev);
+ else
+ netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+ } else {
+ ql_gen_reg_dump(qdev, buff);
ql_get_core_dump(qdev);
+ }
}
/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+ else
+ drvinfo->regdump_len = sizeof(struct ql_reg_dump);
drvinfo->eedump_len = 0;
}
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
static int ql_get_regs_len(struct net_device *ndev)
{
- return sizeof(struct ql_reg_dump);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ return sizeof(struct ql_mpi_coredump);
+ else
+ return sizeof(struct ql_reg_dump);
}
static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
- ql_gen_reg_dump(qdev, p);
+ ql_get_dump(qdev, p);
+ qdev->core_is_dumped = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ regs->len = sizeof(struct ql_mpi_coredump);
+ else
+ regs->len = sizeof(struct ql_reg_dump);
}
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..49bfa5813068 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-static int debug = 0x00007fff; /* defaults above */
-module_param(debug, int, 0);
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
@@ -3548,12 +3548,13 @@ err_irq:
static int ql_start_rss(struct ql_adapter *qdev)
{
- u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
- 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
- 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
- 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
- 0xbe, 0xac, 0x01, 0xfa};
+ static const u8 init_hash_seed[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+ };
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
@@ -3844,7 +3845,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4265,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct netdev_hw_addr *ha;
int i, status;
@@ -4354,7 +4355,7 @@ exit:
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct sockaddr *addr = p;
int status;
@@ -4377,7 +4378,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
}
@@ -4629,6 +4630,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..ff2bf8a4e247 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
int status;
unsigned long count;
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -603,6 +604,7 @@ done:
end:
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
return status;
}
@@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
- rtnl_lock();
status = ql_mb_set_port_cfg(qdev);
- rtnl_unlock();
if (status)
return status;
status = ql_idc_wait(qdev);
@@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
int status;
- rtnl_lock();
status = ql_mb_get_port_cfg(qdev);
- rtnl_unlock();
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
@@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work)
u32 aen;
int timeout;
- rtnl_lock();
aen = mbcp->mbox_out[1] >> 16;
timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
@@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work)
}
break;
}
- rtnl_unlock();
}
void ql_mpi_work(struct work_struct *work)
@@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work)
struct mbox_params *mbcp = &mbc;
int err = 0;
- rtnl_lock();
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- rtnl_unlock();
+ mutex_unlock(&qdev->mpi_mutex);
ql_enable_completion_interrupt(qdev, 0);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..e165d96ec7df 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -744,26 +744,36 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
mdio_write(ioaddr, MII_BMCR, val & 0xffff);
}
-static void rtl8169_check_link_status(struct net_device *dev,
+static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ void __iomem *ioaddr,
+ bool pm)
{
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
/* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(&tp->pci_dev->dev);
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -846,10 +856,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2516,7 +2526,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
val = mdio_read(ioaddr, 0x0d);
if ((val & 0x00ff) != 0x006c) {
- u32 set[] = {
+ static const u32 set[] = {
0x0065, 0x0066, 0x0067, 0x0068,
0x0069, 0x006a, 0x006b, 0x006c
};
@@ -2931,7 +2941,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3240,7 +3250,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&tp->task);
unregister_netdev(dev);
@@ -4440,8 +4450,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4588,7 +4597,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
@@ -4600,7 +4610,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
/* We need to see the lastest version of tp->intr_mask to
* avoid ignoring an MSI interrupt and having to wait for
@@ -4890,11 +4900,7 @@ static int rtl8169_runtime_idle(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
- return 0;
-
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
- return -EBUSY;
+ return tp->TxDescArray ? -EBUSY : 0;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896a..39c17cecb8b9 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -88,14 +88,14 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.27"
+#define DRV_VERSION "2.0.26.28"
/* S2io Driver name & version. */
-static char s2io_driver_name[] = "Neterion";
-static char s2io_driver_version[] = DRV_VERSION;
+static const char s2io_driver_name[] = "Neterion";
+static const char s2io_driver_version[] = DRV_VERSION;
-static int rxd_size[2] = {32, 48};
-static int rxd_count[2] = {127, 85};
+static const int rxd_size[2] = {32, 48};
+static const int rxd_count[2] = {127, 85};
static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
{
@@ -3598,10 +3598,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 != 0x0123456789ABCDEFULL) {
int i = 0;
- u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
- 0x8100008181000081ULL, /* FE=1, SE=0 */
- 0x4200004242000042ULL, /* FE=0, SE=1 */
- 0}; /* FE=0, SE=0 */
+ static const u64 value[] = {
+ 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
+ 0x8100008181000081ULL, /* FE=1, SE=0 */
+ 0x4200004242000042ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
while (i < 4) {
writeq(value[i], &bar0->swapper_ctrl);
@@ -3627,10 +3629,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
if (val64 != valt) {
int i = 0;
- u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
- 0x0081810000818100ULL, /* FE=1, SE=0 */
- 0x0042420000424200ULL, /* FE=0, SE=1 */
- 0}; /* FE=0, SE=0 */
+ static const u64 value[] = {
+ 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
+ 0x0081810000818100ULL, /* FE=1, SE=0 */
+ 0x0042420000424200ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
while (i < 4) {
writeq((value[i] | valr), &bar0->swapper_ctrl);
@@ -5568,30 +5572,27 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
- if (sp->rxd_mode == RXD_MODE_1)
+ if (sp->rxd_mode == RXD_MODE_1) {
ering->rx_max_pending = MAX_RX_DESC_1;
- else if (sp->rxd_mode == RXD_MODE_3B)
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
+ } else {
ering->rx_max_pending = MAX_RX_DESC_2;
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
+ }
+ ering->rx_mini_max_pending = 0;
ering->tx_max_pending = MAX_TX_DESC;
- for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
- tx_desc_count += sp->config.tx_cfg[i].fifo_len;
- DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
- ering->tx_pending = tx_desc_count;
- rx_desc_count = 0;
- for (i = 0 ; i < sp->config.rx_ring_num ; i++)
+ for (i = 0; i < sp->config.rx_ring_num; i++)
rx_desc_count += sp->config.rx_cfg[i].num_rxd;
-
ering->rx_pending = rx_desc_count;
-
- ering->rx_mini_max_pending = 0;
- ering->rx_mini_pending = 0;
- if (sp->rxd_mode == RXD_MODE_1)
- ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
- else if (sp->rxd_mode == RXD_MODE_3B)
- ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
ering->rx_jumbo_pending = rx_desc_count;
+ ering->rx_mini_pending = 0;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ tx_desc_count += sp->config.tx_cfg[i].fifo_len;
+ ering->tx_pending = tx_desc_count;
+ DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
}
/**
@@ -7692,6 +7693,8 @@ static void s2io_init_pci(struct s2io_nic *sp)
static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
u8 *dev_multiq)
{
+ int i;
+
if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
"(%d) not supported\n", tx_fifo_num);
@@ -7750,6 +7753,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
rx_ring_mode = 1;
}
+
+ for (i = 0; i < MAX_RX_RINGS; i++)
+ if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
+ DBG_PRINT(ERR_DBG, "Requested rx ring size not "
+ "supported\nDefaulting to %d\n",
+ MAX_RX_BLOCKS_PER_RING);
+ rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
+ }
+
return SUCCESS;
}
@@ -8321,8 +8333,7 @@ mem_alloc_failed:
static void __devexit s2io_rem_nic(struct pci_dev *pdev)
{
- struct net_device *dev =
- (struct net_device *)pci_get_drvdata(pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct s2io_nic *sp;
if (dev == NULL) {
@@ -8330,9 +8341,11 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
return;
}
- flush_scheduled_work();
-
sp = netdev_priv(dev);
+
+ cancel_work_sync(&sp->rst_timer_task);
+ cancel_work_sync(&sp->set_link_task);
+
unregister_netdev(dev);
free_shared_mem(sp);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 00b8614efe48..7d160306b651 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -355,13 +355,12 @@ struct stat_block {
#define FIFO_OTHER_MAX_NUM 1
-#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 )
-#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
-#define MAX_RX_DESC_3 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
+#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
+#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
/* FIFO mappings for all possible number of fifos configured */
-static int fifo_map[][MAX_TX_FIFOS] = {
+static const int fifo_map[][MAX_TX_FIFOS] = {
{0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 1, 1, 1},
{0, 0, 0, 1, 1, 1, 2, 2},
@@ -372,7 +371,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
{0, 1, 2, 3, 4, 5, 6, 7},
};
-static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
+static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
/* Maintains Per FIFO related information. */
struct tx_fifo_config {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf372828..76290a8c3c14 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
/* faked with skb_copy_and_csum_dev */
- dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->netdev_ops = &sc92031_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e47976..711449c6e675 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -23,7 +23,6 @@
#include <linux/gfp.h>
#include "net_driver.h"
#include "efx.h"
-#include "mdio_10g.h"
#include "nic.h"
#include "mcdi.h"
@@ -197,7 +196,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
@@ -335,8 +336,10 @@ void efx_process_channel_now(struct efx_channel *channel)
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
if (channel->irq)
synchronize_irq(channel->irq);
@@ -351,6 +354,8 @@ void efx_process_channel_now(struct efx_channel *channel)
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
@@ -426,6 +431,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
*channel = *old_channel;
+ channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
@@ -455,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}
- spin_lock_init(&channel->tx_stop_lock);
- atomic_set(&channel->tx_stop_count, 1);
-
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
@@ -736,9 +739,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
if (rc)
goto rollback;
+ efx_init_napi(efx);
+
/* Destroy old channels */
- for (i = 0; i < efx->n_channels; i++)
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
+ }
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
@@ -910,6 +917,7 @@ static void efx_mac_work(struct work_struct *data)
static int efx_probe_port(struct efx_nic *efx)
{
+ unsigned char *perm_addr;
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -923,11 +931,12 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Sanity check MAC address */
- if (is_valid_ether_addr(efx->mac_address)) {
- memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
+ perm_addr = efx->net_dev->perm_addr;
+ if (is_valid_ether_addr(perm_addr)) {
+ memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
} else {
netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
- efx->mac_address);
+ perm_addr);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
@@ -1394,12 +1403,14 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- efx_for_each_channel(channel, efx) {
- if (efx_dev_registered(efx))
- efx_wake_queue(channel);
+ if (efx_dev_registered(efx))
+ netif_tx_wake_all_queues(efx->net_dev);
+
+ efx_for_each_channel(channel, efx)
efx_start_channel(channel);
- }
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1471,10 @@ static void efx_stop_all(struct efx_nic *efx)
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
@@ -1482,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- struct efx_channel *channel;
- efx_for_each_channel(channel, efx)
- efx_stop_queue(channel);
+ netif_tx_stop_all_queues(efx->net_dev);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1593,7 +1604,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
*
**************************************************************************/
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1602,18 +1613,21 @@ static int efx_init_napi(struct efx_nic *efx)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
- return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
- channel->napi_dev = NULL;
- }
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
}
/**************************************************************************
@@ -1877,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
@@ -1899,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
if (rc)
goto fail_locked;
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->core_txq = netdev_get_tx_queue(
+ efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
+ }
+ }
+
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);
@@ -1962,7 +1985,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- mutex_lock(&efx->spi_lock);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2004,7 +2026,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx_init_channels(efx);
efx_restore_filters(efx);
- mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2014,7 +2035,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -2202,8 +2222,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
- mutex_init(&efx->mdio_lock);
- mutex_init(&efx->spi_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
#endif
@@ -2335,9 +2353,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
if (rc)
goto fail1;
- rc = efx_init_napi(efx);
- if (rc)
- goto fail2;
+ efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
@@ -2368,7 +2384,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
- fail2:
efx_remove_all(efx);
fail1:
return rc;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 10a1bf40da96..d43a7e5212b1 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_channel *channel);
-extern void efx_wake_queue(struct efx_channel *channel);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,9 +72,8 @@ extern int efx_filter_insert_filter(struct efx_nic *efx,
bool replace);
extern int efx_filter_remove_filter(struct efx_nic *efx,
struct efx_filter_spec *spec);
-extern void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority);
+extern void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16b8b47..0e8bb19ed60d 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -11,14 +11,13 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
+#include <linux/in.h>
#include "net_driver.h"
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
-#include "spi.h"
-#include "mdio_10g.h"
struct ethtool_string {
char name[ETH_GSTRING_LEN];
@@ -560,12 +559,8 @@ static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
if (rc)
return rc;
- if (!(data & ETH_FLAG_NTUPLE)) {
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_PRI_MANUAL);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_PRI_MANUAL);
- }
+ if (!(data & ETH_FLAG_NTUPLE))
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -584,6 +579,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail1;
}
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
@@ -602,9 +600,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!already_up)
dev_close(efx->net_dev);
- netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
- rc == 0 ? "passed" : "failed",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
fail2:
fail1:
@@ -622,68 +620,6 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
return mdio45_nway_restart(&efx->mdio);
}
-static u32 efx_ethtool_get_link(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return efx->link_state.up;
-}
-
-static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
-
- if (!spi)
- return 0;
- return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
-}
-
-static int efx_ethtool_get_eeprom(struct net_device *net_dev,
- struct ethtool_eeprom *eeprom, u8 *buf)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
- size_t len;
- int rc;
-
- rc = mutex_lock_interruptible(&efx->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi,
- eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
- eeprom->len, &len, buf);
- mutex_unlock(&efx->spi_lock);
-
- eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
- eeprom->len = len;
- return rc;
-}
-
-static int efx_ethtool_set_eeprom(struct net_device *net_dev,
- struct ethtool_eeprom *eeprom, u8 *buf)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
- size_t len;
- int rc;
-
- if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
- return -EINVAL;
-
- rc = mutex_lock_interruptible(&efx->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi,
- eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
- eeprom->len, &len, buf);
- mutex_unlock(&efx->spi_lock);
-
- eeprom->len = len;
- return rc;
-}
-
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
@@ -978,6 +914,7 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
struct efx_filter_spec filter;
+ int rc;
/* Range-check action */
if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
@@ -987,9 +924,16 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
if (~ntuple->fs.data_mask)
return -EINVAL;
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
+ (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
+ 0xfff : ntuple->fs.action);
+
switch (ntuple->fs.flow_type) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+
/* Must match all of destination, */
if (ip_mask->ip4dst | ip_mask->pdst)
return -EINVAL;
@@ -1001,7 +945,22 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
/* and nothing else */
if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
return -EINVAL;
+
+ if (!ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst,
+ ip_entry->ip4src,
+ ip_entry->psrc);
+ else
+ rc = efx_filter_set_ipv4_local(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst);
+ if (rc)
+ return rc;
break;
+ }
+
case ETHER_FLOW:
/* Must match all of destination, */
if (!is_zero_ether_addr(mac_mask->h_dest))
@@ -1014,58 +973,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
if (!is_broadcast_ether_addr(mac_mask->h_source) ||
mac_mask->h_proto != htons(0xffff))
return -EINVAL;
+
+ rc = efx_filter_set_eth_local(
+ &filter,
+ (ntuple->fs.vlan_tag_mask == 0xf000) ?
+ ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ mac_entry->h_dest);
+ if (rc)
+ return rc;
break;
+
default:
return -EINVAL;
}
- filter.priority = EFX_FILTER_PRI_MANUAL;
- filter.flags = 0;
-
- switch (ntuple->fs.flow_type) {
- case TCP_V4_FLOW:
- if (!ip_mask->ip4src)
- efx_filter_set_rx_tcp_full(&filter,
- htonl(ip_entry->ip4src),
- htons(ip_entry->psrc),
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- else
- efx_filter_set_rx_tcp_wild(&filter,
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- break;
- case UDP_V4_FLOW:
- if (!ip_mask->ip4src)
- efx_filter_set_rx_udp_full(&filter,
- htonl(ip_entry->ip4src),
- htons(ip_entry->psrc),
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- else
- efx_filter_set_rx_udp_wild(&filter,
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- break;
- case ETHER_FLOW:
- if (ntuple->fs.vlan_tag_mask == 0xf000)
- efx_filter_set_rx_mac_full(&filter,
- ntuple->fs.vlan_tag & 0xfff,
- mac_entry->h_dest);
- else
- efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
- break;
- }
-
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
return efx_filter_remove_filter(efx, &filter);
- } else {
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
- filter.dmaq_id = 0xfff;
- else
- filter.dmaq_id = ntuple->fs.action;
+ else
return efx_filter_insert_filter(efx, &filter, true);
- }
}
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1115,10 +1040,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
.nway_reset = efx_ethtool_nway_reset,
- .get_link = efx_ethtool_get_link,
- .get_eeprom_len = efx_ethtool_get_eeprom_len,
- .get_eeprom = efx_ethtool_get_eeprom,
- .set_eeprom = efx_ethtool_set_eeprom,
+ .get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
.get_ringparam = efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019bb2b15..70e4f7dcce81 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -24,7 +24,6 @@
#include "nic.h"
#include "regs.h"
#include "io.h"
-#include "mdio_10g.h"
#include "phy.h"
#include "workarounds.h"
@@ -255,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
/* Input validation */
if (len > FALCON_SPI_MAX_LEN)
return -EINVAL;
- BUG_ON(!mutex_is_locked(&efx->spi_lock));
/* Check that previous command is not still running */
rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int rc;
@@ -726,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
"writing MDIO %d register %d.%d with 0x%04x\n",
prtad, devad, addr, value);
- mutex_lock(&efx->mdio_lock);
+ mutex_lock(&nic_data->mdio_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
}
out:
- mutex_unlock(&efx->mdio_lock);
+ mutex_unlock(&nic_data->mdio_lock);
return rc;
}
@@ -771,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int rc;
- mutex_lock(&efx->mdio_lock);
+ mutex_lock(&nic_data->mdio_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@ static int falcon_mdio_read(struct net_device *net_dev,
}
out:
- mutex_unlock(&efx->mdio_lock);
+ mutex_unlock(&nic_data->mdio_lock);
return rc;
}
@@ -841,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
}
/* Fill out MDIO structure and loopback modes */
+ mutex_init(&nic_data->mdio_lock);
efx->mdio.mdio_read = falcon_mdio_read;
efx->mdio.mdio_write = falcon_mdio_write;
rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
+/* Global events are basically PHY events */
+static bool
+falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+ /* Ignored */
+ return true;
+
+ if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+ nic_data->xmac_poll_required = true;
+ return true;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+ EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
+
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return true;
+ }
+
+ return false;
+}
+
/**************************************************************************
*
* Falcon test code
@@ -889,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
static int
falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
struct efx_spi_device *spi;
void *region;
@@ -896,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
__le16 *word, *limit;
u32 csum;
- spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
- if (!spi)
+ if (efx_spi_present(&nic_data->spi_flash))
+ spi = &nic_data->spi_flash;
+ else if (efx_spi_present(&nic_data->spi_eeprom))
+ spi = &nic_data->spi_eeprom;
+ else
return -EINVAL;
region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
return -ENOMEM;
nvconfig = region + FALCON_NVCONFIG_OFFSET;
- mutex_lock(&efx->spi_lock);
+ mutex_lock(&nic_data->spi_lock);
rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx->spi_flash ? "flash" : "EEPROM");
+ efx_spi_present(&nic_data->spi_flash) ?
+ "flash" : "EEPROM");
rc = -EIO;
goto out;
}
@@ -1012,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */
-static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t glb_ctl_reg_ker;
@@ -1108,6 +1149,18 @@ fail5:
return rc;
}
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = __falcon_reset_hw(efx, method);
+ mutex_unlock(&nic_data->spi_lock);
+
+ return rc;
+}
+
static void falcon_monitor(struct efx_nic *efx)
{
bool link_changed;
@@ -1189,16 +1242,11 @@ static int falcon_reset_sram(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static int falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device **spi_device_ret,
+static void falcon_spi_device_init(struct efx_nic *efx,
+ struct efx_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
- struct efx_spi_device *spi_device;
-
if (device_type != 0) {
- spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
- if (!spi_device)
- return -ENOMEM;
spi_device->device_id = device_id;
spi_device->size =
1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1263,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1 << SPI_DEV_TYPE_FIELD(device_type,
SPI_DEV_TYPE_BLOCK_SIZE);
} else {
- spi_device = NULL;
+ spi_device->size = 0;
}
-
- kfree(*spi_device_ret);
- *spi_device_ret = spi_device;
- return 0;
-}
-
-static void falcon_remove_spi_devices(struct efx_nic *efx)
-{
- kfree(efx->spi_eeprom);
- efx->spi_eeprom = NULL;
- kfree(efx->spi_flash);
- efx->spi_flash = NULL;
}
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- int board_rev;
int rc;
nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1279,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
return -ENOMEM;
rc = falcon_read_nvram(efx, nvconfig);
- if (rc == -EINVAL) {
- netif_err(efx, probe, efx->net_dev,
- "NVRAM is invalid therefore using defaults\n");
- efx->phy_type = PHY_TYPE_NONE;
- efx->mdio.prtad = MDIO_PRTAD_NONE;
- board_rev = 0;
- rc = 0;
- } else if (rc) {
- goto fail1;
- } else {
- struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
- struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
-
- efx->phy_type = v2->port0_phy_type;
- efx->mdio.prtad = v2->port0_phy_addr;
- board_rev = le16_to_cpu(v2->board_revision);
-
- if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
- rc = falcon_spi_device_init(
- efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
- le32_to_cpu(v3->spi_device_type
- [FFE_AB_SPI_DEVICE_FLASH]));
- if (rc)
- goto fail2;
- rc = falcon_spi_device_init(
- efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
- le32_to_cpu(v3->spi_device_type
- [FFE_AB_SPI_DEVICE_EEPROM]));
- if (rc)
- goto fail2;
- }
+ if (rc)
+ goto out;
+
+ efx->phy_type = nvconfig->board_v2.port0_phy_type;
+ efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
+
+ if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+ falcon_spi_device_init(
+ efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
+ falcon_spi_device_init(
+ efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
}
/* Read the MAC addresses */
- memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
+ memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
efx->phy_type, efx->mdio.prtad);
- rc = falcon_probe_board(efx, board_rev);
- if (rc)
- goto fail2;
-
- kfree(nvconfig);
- return 0;
-
- fail2:
- falcon_remove_spi_devices(efx);
- fail1:
+ rc = falcon_probe_board(efx,
+ le16_to_cpu(nvconfig->board_v2.board_revision));
+out:
kfree(nvconfig);
return rc;
}
@@ -1299,6 +1312,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
int boot_dev;
@@ -1327,12 +1341,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
}
+ mutex_init(&nic_data->spi_lock);
+
if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
- falcon_spi_device_init(efx, &efx->spi_flash,
+ falcon_spi_device_init(efx, &nic_data->spi_flash,
FFE_AB_SPI_DEVICE_FLASH,
default_flash_type);
if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
- falcon_spi_device_init(efx, &efx->spi_eeprom,
+ falcon_spi_device_init(efx, &nic_data->spi_eeprom,
FFE_AB_SPI_DEVICE_EEPROM,
large_eeprom_type);
}
@@ -1397,7 +1413,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Now we can reset the NIC */
- rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -1419,8 +1435,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
/* Read in the non-volatile configuration */
rc = falcon_probe_nvconfig(efx);
- if (rc)
+ if (rc) {
+ if (rc == -EINVAL)
+ netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
goto fail5;
+ }
/* Initialise I2C adapter */
board = falcon_board(efx);
@@ -1452,7 +1471,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
BUG_ON(i2c_del_adapter(&board->i2c_adap));
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
fail5:
- falcon_remove_spi_devices(efx);
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
@@ -1606,10 +1624,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
BUG_ON(rc);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
- falcon_remove_spi_devices(efx);
efx_nic_free_buffer(efx, &efx->irq_status);
- falcon_reset_hw(efx, RESET_TYPE_ALL);
+ __falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */
if (nic_data->pci_dev2) {
@@ -1720,6 +1737,7 @@ struct efx_nic_type falcon_a1_nic_type = {
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -1760,6 +1778,7 @@ struct efx_nic_type falcon_b0_nic_type = {
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b5a477..2dd16f0b3ced 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -13,8 +13,6 @@
#include "phy.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
-#include "io.h"
#include "workarounds.h"
/* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
#define FALCON_BOARD_SFN4112F 0x52
/* Board temperature is about 15°C above ambient when air flow is
- * limited. */
+ * limited. The maximum acceptable ambient temperature varies
+ * depending on the PHY specifications but the critical temperature
+ * above which we should shut down to avoid damage is 80°C. */
#define FALCON_BOARD_TEMP_BIAS 15
+#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
/* SFC4000 datasheet says: 'The maximum permitted junction temperature
* is 125°C; the thermal design of the environment for the SFC4000
* should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MIN 0
#define FALCON_JUNC_TEMP_MAX 90
+#define FALCON_JUNC_TEMP_CRIT 125
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
+#define LM87_REG_TEMP_HW_INT_LOCK 0x13
+#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
+#define LM87_REG_TEMP_HW_INT 0x17
+#define LM87_REG_TEMP_HW_EXT 0x18
+#define LM87_REG_TEMP_EXT1 0x26
+#define LM87_REG_TEMP_INT 0x27
#define LM87_REG_ALARMS1 0x41
#define LM87_REG_ALARMS2 0x42
#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+{
+ while (*reg_values) {
+ u8 reg = *reg_values++;
+ u8 value = *reg_values++;
+ int rc = i2c_smbus_write_byte_data(client, reg, value);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static const u8 falcon_lm87_common_regs[] = {
+ LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
+ LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
+ LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
+ LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
+ LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
+ 0
+};
+
static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
const u8 *reg_values)
{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
if (!client)
return -EIO;
- while (*reg_values) {
- u8 reg = *reg_values++;
- u8 value = *reg_values++;
- rc = i2c_smbus_write_byte_data(client, reg, value);
- if (rc)
- goto err;
- }
+ /* Read-to-clear alarm/interrupt status */
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+
+ rc = efx_poke_lm87(client, reg_values);
+ if (rc)
+ goto err;
+ rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+ if (rc)
+ goto err;
board->hwmon_client = client;
return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
- s32 alarms1, alarms2;
+ bool temp_crit, elec_fault, is_failure;
+ u16 alarms;
+ s32 reg;
/* If link is up then do not monitor temperature */
if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
- alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
- alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
- if (alarms1 < 0)
- return alarms1;
- if (alarms2 < 0)
- return alarms2;
- alarms1 &= mask;
- alarms2 &= mask >> 8;
- if (alarms1 || alarms2) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ if (reg < 0)
+ return reg;
+ alarms = reg;
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+ if (reg < 0)
+ return reg;
+ alarms |= reg << 8;
+ alarms &= mask;
+
+ temp_crit = false;
+ if (alarms & LM87_ALARM_TEMP_INT) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_BOARD_TEMP_CRIT)
+ temp_crit = true;
+ }
+ if (alarms & LM87_ALARM_TEMP_EXT1) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_JUNC_TEMP_CRIT)
+ temp_crit = true;
+ }
+ elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
+ is_failure = temp_crit || elec_fault;
+
+ if (alarms)
netif_err(efx, hw, efx->net_dev,
- "LM87 detected a hardware failure (status %02x:%02x)"
- "%s%s%s\n",
- alarms1, alarms2,
- (alarms1 & LM87_ALARM_TEMP_INT) ?
+ "LM87 detected a hardware %s (status %02x:%02x)"
+ "%s%s%s%s\n",
+ is_failure ? "failure" : "problem",
+ alarms & 0xff, alarms >> 8,
+ (alarms & LM87_ALARM_TEMP_INT) ?
"; board is overheating" : "",
- (alarms1 & LM87_ALARM_TEMP_EXT1) ?
+ (alarms & LM87_ALARM_TEMP_EXT1) ?
"; controller is overheating" : "",
- (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
- || alarms2) ?
- "; electrical fault" : "");
- return -ERANGE;
- }
+ temp_crit ? "; reached critical temperature" : "",
+ elec_fault ? "; electrical fault" : "");
- return 0;
+ return is_failure ? -ERANGE : 0;
}
#else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = old_mode & ~PHY_MODE_SPECIAL;
else
new_mode = PHY_MODE_SPECIAL;
- if (old_mode == new_mode) {
+ if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
@@ -362,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
static int sfe4001_check_hw(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
- if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
+ if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595ebb5b..b49e84394641 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -16,7 +16,6 @@
#include "io.h"
#include "mac.h"
#include "mdio_10g.h"
-#include "phy.h"
#include "workarounds.h"
/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
static void falcon_ack_status_intr(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
/* We can only use this interrupt to signal the negative edge of
* xaui_align [we have to poll the positive edge]. */
- if (efx->xmac_poll_required)
+ if (nic_data->xmac_poll_required)
return;
efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_ack_status_intr(efx);
return 0;
@@ -350,11 +352,13 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
void falcon_poll_xmac(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !efx->xmac_poll_required)
+ !nic_data->xmac_poll_required)
return;
- efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_ack_status_intr(efx);
}
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b910..d4722c41c4ce 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -7,6 +7,7 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/in.h>
#include "efx.h"
#include "filter.h"
#include "io.h"
@@ -26,19 +27,26 @@
*/
#define FILTER_CTL_SRCH_MAX 200
+enum efx_filter_table_id {
+ EFX_FILTER_TABLE_RX_IP = 0,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_COUNT,
+};
+
struct efx_filter_table {
+ enum efx_filter_table_id id;
u32 offset; /* address of table relative to BAR */
unsigned size; /* number of entries */
unsigned step; /* step between entries */
unsigned used; /* number currently used */
unsigned long *used_bitmap;
struct efx_filter_spec *spec;
+ unsigned search_depth[EFX_FILTER_TYPE_COUNT];
};
struct efx_filter_state {
spinlock_t lock;
struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -65,68 +73,203 @@ static u16 efx_filter_increment(u32 key)
}
static enum efx_filter_table_id
-efx_filter_type_table_id(enum efx_filter_type type)
+efx_filter_spec_table_id(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
+ EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
+ return spec->type >> 2;
+}
+
+static struct efx_filter_table *
+efx_filter_spec_table(struct efx_filter_state *state,
+ const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
- return type >> 2;
+ if (spec->type == EFX_FILTER_UNSPEC)
+ return NULL;
+ else
+ return &state->table[efx_filter_spec_table_id(spec)];
}
-static void
-efx_filter_table_reset_search_depth(struct efx_filter_state *state,
- enum efx_filter_table_id table_id)
+static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
{
- memset(state->search_depth + (table_id << 2), 0,
- sizeof(state->search_depth[0]) << 2);
+ memset(table->search_depth, 0, sizeof(table->search_depth));
}
static void efx_filter_push_rx_limits(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table;
efx_oword_t filter_ctl;
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+ table = &state->table[EFX_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_TCP_FULL] +
+ table->search_depth[EFX_FILTER_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_TCP_WILD] +
+ table->search_depth[EFX_FILTER_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_UDP_FULL] +
+ table->search_depth[EFX_FILTER_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_UDP_WILD] +
+ table->search_depth[EFX_FILTER_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
- if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
+ table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_MAC_FULL] +
+ table->search_depth[EFX_FILTER_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_MAC_WILD] +
+ table->search_depth[EFX_FILTER_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
+static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
+ __be32 host1, __be16 port1,
+ __be32 host2, __be16 port2)
+{
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_WILD;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_WILD;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ host1 = 0;
+ if (proto != IPPROTO_UDP) {
+ port1 = 0;
+ } else {
+ port1 = port;
+ port = 0;
+ }
+
+ __efx_filter_set_ipv4(spec, host1, port1, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0 || rport == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_FULL;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_FULL;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ __efx_filter_set_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and optional VID
+ * @spec: Specification to initialise
+ * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address
+ */
+int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (vid == EFX_FILTER_VID_UNSPEC) {
+ spec->type = EFX_FILTER_MAC_WILD;
+ spec->data[0] = 0;
+ } else {
+ spec->type = EFX_FILTER_MAC_FULL;
+ spec->data[0] = vid;
+ }
+
+ spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ spec->data[2] = addr[0] << 8 | addr[1];
+ return 0;
+}
+
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
u32 data3;
- switch (efx_filter_type_table_id(spec->type)) {
+ switch (efx_filter_spec_table_id(spec)) {
case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
- spec->type == EFX_FILTER_RX_UDP_WILD);
+ bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
+ spec->type == EFX_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7(
*filter,
FRF_BZ_RSS_EN,
@@ -143,7 +286,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
}
case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
+ bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_8(
*filter,
FRF_CZ_RMFT_RSS_EN,
@@ -206,6 +349,14 @@ found:
return filter_idx;
}
+/* Construct/deconstruct external filter IDs */
+
+static inline int
+efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+{
+ return table_id << 16 | index;
+}
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -213,30 +364,28 @@ found:
* @replace: Flag for whether the specified filter may replace a filter
* with an identical match expression and equal or lower priority
*
- * On success, return the filter index within its table.
+ * On success, return the filter ID.
* On failure, return a negative error code.
*/
int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id =
- efx_filter_type_table_id(spec->type);
- struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
- if (table->size == 0)
+ if (!table || table->size == 0)
return -EINVAL;
key = efx_filter_build(&filter, spec);
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
- state->search_depth[spec->type]);
+ table->search_depth[spec->type]);
spin_lock_bh(&state->lock);
@@ -263,8 +412,8 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
*saved_spec = *spec;
- if (state->search_depth[spec->type] < depth) {
- state->search_depth[spec->type] = depth;
+ if (table->search_depth[spec->type] < depth) {
+ table->search_depth[spec->type] = depth;
efx_filter_push_rx_limits(efx);
}
@@ -273,6 +422,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
+ rc = efx_filter_make_id(table->id, filter_idx);
out:
spin_unlock_bh(&state->lock);
@@ -306,15 +456,16 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
{
struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id =
- efx_filter_type_table_id(spec->type);
- struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
+ if (!table)
+ return -EINVAL;
+
key = efx_filter_build(&filter, spec);
spin_lock_bh(&state->lock);
@@ -332,7 +483,7 @@ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
- efx_filter_table_reset_search_depth(state, table_id);
+ efx_filter_table_reset_search_depth(table);
rc = 0;
out:
@@ -340,15 +491,9 @@ out:
return rc;
}
-/**
- * efx_filter_table_clear - remove filters from a table by priority
- * @efx: NIC from which to remove the filters
- * @table_id: Table from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
+static void efx_filter_table_clear(struct efx_nic *efx,
+ enum efx_filter_table_id table_id,
+ enum efx_filter_priority priority)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
@@ -360,11 +505,22 @@ void efx_filter_table_clear(struct efx_nic *efx,
if (table->spec[filter_idx].priority <= priority)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
- efx_filter_table_reset_search_depth(state, table_id);
+ efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
}
+/**
+ * efx_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
+{
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
+}
+
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
@@ -407,6 +563,7 @@ int efx_probe_filters(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ table->id = EFX_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
@@ -414,6 +571,7 @@ int efx_probe_filters(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
@@ -428,10 +586,9 @@ int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
- table->spec = vmalloc(table->size * sizeof(*table->spec));
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
if (!table->spec)
goto fail;
- memset(table->spec, 0, table->size * sizeof(*table->spec));
}
return 0;
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
index a53319ded79c..872f2132a496 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/sfc/filter.h
@@ -12,31 +12,27 @@
#include <linux/types.h>
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
/**
* enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
- * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
- * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
- * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
+ * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
+ * @EFX_FILTER_UNSPEC: Match type is unspecified
*
- * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
+ * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
*/
enum efx_filter_type {
- EFX_FILTER_RX_TCP_FULL = 0,
- EFX_FILTER_RX_TCP_WILD,
- EFX_FILTER_RX_UDP_FULL,
- EFX_FILTER_RX_UDP_WILD,
- EFX_FILTER_RX_MAC_FULL = 4,
- EFX_FILTER_RX_MAC_WILD,
- EFX_FILTER_TYPE_COUNT,
+ EFX_FILTER_TCP_FULL = 0,
+ EFX_FILTER_TCP_WILD,
+ EFX_FILTER_UDP_FULL,
+ EFX_FILTER_UDP_WILD,
+ EFX_FILTER_MAC_FULL = 4,
+ EFX_FILTER_MAC_WILD,
+ EFX_FILTER_TYPE_COUNT, /* number of specific types */
+ EFX_FILTER_UNSPEC = 0xf,
};
/**
@@ -63,13 +59,13 @@ enum efx_filter_priority {
* @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
* any IP filter that matches the same packet. By default, IP
* filters take precedence.
- *
- * Currently, no flags are defined for TX filters.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+ EFX_FILTER_FLAG_RX = 0x08,
};
/**
@@ -91,99 +87,26 @@ struct efx_filter_spec {
u32 data[3];
};
-/**
- * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
- u32 shost, u16 sport, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_TCP_FULL;
- spec->data[0] = sport | shost << 16;
- spec->data[1] = dport << 16 | shost >> 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_TCP_WILD;
- spec->data[0] = 0;
- spec->data[1] = dport << 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
- u32 shost, u16 sport, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_UDP_FULL;
- spec->data[0] = sport | shost << 16;
- spec->data[1] = dport << 16 | shost >> 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+ enum efx_filter_priority priority,
+ enum efx_filter_flags flags,
+ unsigned rxq_id)
{
- spec->type = EFX_FILTER_RX_UDP_WILD;
- spec->data[0] = dport;
- spec->data[1] = 0;
- spec->data[2] = dhost;
+ spec->type = EFX_FILTER_UNSPEC;
+ spec->priority = priority;
+ spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->dmaq_id = rxq_id;
}
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
- * @spec: Specification to initialise
- * @vid: VLAN ID
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- spec->type = EFX_FILTER_RX_MAC_FULL;
- spec->data[0] = vid;
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
-}
-
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
- * @spec: Specification to initialise
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
- const u8 *addr)
-{
- spec->type = EFX_FILTER_RX_MAC_WILD;
- spec->data[0] = 0;
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
-}
+extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port);
+extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport);
+extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr);
+enum {
+ EFX_FILTER_VID_UNSPEC = 0xffff,
+};
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe87437..6da4ae20a039 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -22,28 +22,39 @@
*
* Notes on locking strategy:
*
- * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
- * which necessitates locking.
- * Under normal operation few writes to NIC registers are made and these
- * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
- * cased to allow 4-byte (hence lockless) accesses.
+ * Most CSRs are 128-bit (oword) and therefore cannot be read or
+ * written atomically. Access from the host is buffered by the Bus
+ * Interface Unit (BIU). Whenever the host reads from the lowest
+ * address of such a register, or from the address of a different such
+ * register, the BIU latches the register's value. Subsequent reads
+ * from higher addresses of the same register will read the latched
+ * value. Whenever the host writes part of such a register, the BIU
+ * collects the written value and does not write to the underlying
+ * register until all 4 dwords have been written. A similar buffering
+ * scheme applies to host access to the NIC's 64-bit SRAM.
*
- * It *is* safe to write to these 4-byte registers in the middle of an
- * access to an 8-byte or 16-byte register. We therefore use a
- * spinlock to protect accesses to the larger registers, but no locks
- * for the 4-byte registers.
+ * Access to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes or lost
+ * information from read-to-clear fields. We use efx_nic::biu_lock
+ * for this. (We could use separate locks for read and write, but
+ * this is not normally a performance bottleneck.)
*
- * A write barrier is needed to ensure that DW3 is written after DW0/1/2
- * due to the way the 16byte registers are "collected" in the BIU.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
*
- * We also lock when carrying out reads, to ensure consistency of the
- * data (made possible since the BIU reads all 128 bits into a cache).
- * Reads are very rare, so this isn't a significant performance
- * impact. (Most data transferred from NIC to host is DMAed directly
- * into host memory).
- *
- * I/O BAR access uses locks for both reads and writes (but is only provided
- * for testing purposes).
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector does not hold values for the low 96
+ * bits of the register, they will be written as zero. Writing to
+ * the last qword does not have this effect and must not be done.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
*/
#if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
return (__force __le32)__raw_readl(efx->membase + reg);
}
-/* Writes to a normal 16-byte Efx register, locking as appropriate. */
+/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
@@ -85,21 +96,18 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
- wmb();
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
- wmb();
_efx_writed(efx, value->u32[3], reg + 12);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
-/* Write an 8-byte NIC SRAM entry through the supplied mapping,
- * locking as appropriate. */
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
@@ -115,36 +123,25 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
- wmb();
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
-/* Write dword to NIC register that allows partial writes
- *
- * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
- * TX_DESC_UPD_REG) can be written to as a single dword. This allows
- * for lockless writes.
- */
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
- "writing partial register %x with "EFX_DWORD_FMT"\n",
+ "writing register %x with "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
}
-/* Read from a NIC register
- *
- * This reads an entire 16-byte register in one go, locking as
- * appropriate. It is essential to read the first dword first, as this
- * prompts the NIC to load the current value into the shadow register.
- */
+/* Read a 128-bit CSR, locking as appropriate. */
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -163,8 +159,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
EFX_OWORD_VAL(*value));
}
-/* Read an 8-byte SRAM entry through supplied mapping,
- * locking as appropriate. */
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
@@ -176,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -186,7 +180,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
addr, EFX_QWORD_VAL(*value));
}
-/* Read dword from register that allows partial writes (sic) */
+/* Read a 32-bit CSR or SRAM */
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
@@ -196,28 +190,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
reg, EFX_DWORD_VAL(*value));
}
-/* Write to a register forming part of a table */
+/* Write a 128-bit CSR forming part of a table */
static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Read to a register forming part of a table */
+/* Read a 128-bit CSR forming part of a table */
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Write to a dword register forming part of a table */
+/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Read from a dword register forming part of a table */
+/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
@@ -231,29 +225,54 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
#define EFX_PAGED_REG(page, reg) \
((page) * EFX_PAGE_BLOCK_SIZE + (reg))
-/* As for efx_writeo(), but for a page-mapped register. */
-static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int page)
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
{
- efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
-}
+ reg = EFX_PAGED_REG(page, reg);
-/* As for efx_writed(), but for a page-mapped register. */
-static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+#endif
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
+ * RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
-
-/* Write dword to page-mapped register with an extra lock.
- *
- * As for efx_writed_page(), but for a register that suffers from
- * SFC bug 3181. Take out a lock so the BIU collector cannot be
- * confused. */
-static inline void efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
+ && (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
{
unsigned long flags __attribute__ ((unused));
@@ -265,5 +284,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx,
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910c2ce7..b716e827b291 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -381,7 +381,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else
- netif_err(efx, hw, efx->net_dev,
+ netif_dbg(efx, hw, efx->net_dev,
"MC command 0x%x inlen %d failed rc=%d\n",
cmd, (int)inlen, -rc);
}
@@ -463,6 +463,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
mcdi->resplen = 0;
+ ++mcdi->credits;
}
} else
/* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742446b1..0e97eed663c6 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -16,7 +16,6 @@
#include "phy.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
-#include "mdio_10g.h"
#include "nic.h"
#include "selftest.h"
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d946020429..56b0266b441f 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,7 +15,6 @@
#include "net_driver.h"
#include "mdio_10g.h"
#include "workarounds.h"
-#include "nic.h"
unsigned efx_mdio_id_oui(u32 id)
{
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701f..d38627448c22 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_read(efx, spi, part->offset + start, len,
retlen, buffer);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_write(efx, spi, part->offset + start, len,
retlen, buffer);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- mutex_lock(&efx->spi_lock);
+ mutex_lock(&nic_data->spi_lock);
rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
static int falcon_mtd_probe(struct efx_nic *efx)
{
- struct efx_spi_device *spi = efx->spi_flash;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct efx_spi_device *spi;
struct efx_mtd *efx_mtd;
- int rc;
+ int rc = -ENODEV;
ASSERT_RTNL();
- if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
- return -ENODEV;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
+ spi = &nic_data->spi_flash;
+ if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "flash";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_NORFLASH;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
+ efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+ spi = &nic_data->spi_eeprom;
+ if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "EEPROM";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_RAM;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
+ efx_mtd->part[0].mtd.size =
+ min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
+ EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].type_name = "sfc_bootconfig";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc)
- kfree(efx_mtd);
return rc;
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d73b52..bdce66ddf93a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -136,14 +136,19 @@ struct efx_tx_buffer {
* @efx: The associated Efx NIC
* @queue: DMA queue number
* @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
- * @stopped: Stopped count.
- * Set if this TX queue is currently stopping its port.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -163,13 +168,17 @@ struct efx_tx_buffer {
* @tso_long_headers: Number of packets with headers too long for standard
* blocks
* @tso_packets: Number of packets via the TSO xmit path
+ * @pushes: Number of times the TX push feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
*/
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue;
struct efx_channel *channel;
- struct efx_nic *nic;
+ struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
unsigned int ptr_mask;
@@ -177,7 +186,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
- int stopped;
+ unsigned int old_write_count;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -187,6 +196,11 @@ struct efx_tx_queue {
unsigned int tso_bursts;
unsigned int tso_long_headers;
unsigned int tso_packets;
+ unsigned int pushes;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
};
/**
@@ -305,7 +319,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
@@ -326,8 +339,6 @@ enum efx_rx_alloc_method {
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @rx_queue: RX queue for this channel
- * @tx_stop_count: Core TX queue stop count
- * @tx_stop_lock: Core TX queue stop lock
* @tx_queue: TX queues for this channel
*/
struct efx_channel {
@@ -366,10 +377,6 @@ struct efx_channel {
bool rx_pkt_csummed;
struct efx_rx_queue rx_queue;
-
- atomic_t tx_stop_count;
- spinlock_t tx_stop_lock;
-
struct efx_tx_queue tx_queue[2];
};
@@ -621,14 +628,13 @@ struct efx_filter_state;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
* @reset_work: Scheduled reset workitem
- * @monitor_work: Hardware monitor workitem
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
- * @biu_lock: BIU (bus interface unit) lock
* @interrupt_mode: Interrupt mode
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -647,23 +653,14 @@ struct efx_filter_state;
* @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
- * @last_irq_cpu: Last CPU to handle interrupt.
- * This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by efx_nic_test_interrupt()
- * to verify that an interrupt has occurred.
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @fatal_irq_level: IRQ level (bit number) used for serious errors
- * @spi_flash: SPI flash device
- * This field will be %NULL if no flash device is present (or for Siena).
- * @spi_eeprom: SPI EEPROM device
- * This field will be %NULL if no EEPROM device is present (or for Siena).
- * @spi_lock: SPI bus lock
* @mtd_list: List of MTDs attached to the NIC
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @nic_data: Hardware dependant state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* @port_inhibited, efx_monitor() and efx_reconfigure_port()
@@ -676,21 +673,14 @@ struct efx_filter_state;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
- * @stats_lock: Statistics update lock. Serialises statistics fetches
* @mac_op: MAC interface
- * @mac_address: Permanent MAC address
* @phy_type: PHY type
- * @mdio_lock: MDIO lock
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mdio: PHY MDIO interface
* @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
- * @xmac_poll_required: XMAC link state needs polling
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
@@ -701,21 +691,34 @@ struct efx_filter_state;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle interrupt.
+ * This register is written with the SMP processor ID whenever an
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
+ * to verify that an interrupt has occurred.
+ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ * can provide. Generic code converts these into a standard
+ * &struct net_device_stats.
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
*
* This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
+ /* The following fields should be written very rarely */
+
char name[IFNAMSIZ];
struct pci_dev *pci_dev;
const struct efx_nic_type *type;
int legacy_irq;
+ bool legacy_irq_enabled;
struct workqueue_struct *workqueue;
char workqueue_name[16];
struct work_struct reset_work;
- struct delayed_work monitor_work;
resource_size_t membase_phys;
void __iomem *membase;
- spinlock_t biu_lock;
+
enum efx_int_mode interrupt_mode;
bool irq_rx_adaptive;
unsigned int irq_rx_moderation;
@@ -742,19 +745,13 @@ struct efx_nic {
unsigned long int_error_expire;
struct efx_buffer irq_status;
- volatile signed int last_irq_cpu;
unsigned irq_zero_count;
unsigned fatal_irq_level;
- struct efx_spi_device *spi_flash;
- struct efx_spi_device *spi_eeprom;
- struct mutex spi_lock;
#ifdef CONFIG_SFC_MTD
struct list_head mtd_list;
#endif
- unsigned n_rx_nodesc_drop_cnt;
-
void *nic_data;
struct mutex mac_lock;
@@ -766,22 +763,17 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
- spinlock_t stats_lock;
struct efx_mac_operations *mac_op;
- unsigned char mac_address[ETH_ALEN];
unsigned int phy_type;
- struct mutex mdio_lock;
struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- bool xmac_poll_required;
u32 link_advertising;
struct efx_link_state link_state;
unsigned int n_link_state_changes;
@@ -797,6 +789,15 @@ struct efx_nic {
void *loopback_selftest;
struct efx_filter_state *filter_state;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ volatile signed int last_irq_cpu;
+ unsigned n_rx_nodesc_drop_cnt;
+ struct efx_mac_stats mac_stats;
+ spinlock_t stats_lock;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -829,6 +830,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* be called while the controller is uninitialised.
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
* @update_stats: Update statistics not provided by event handling
* @start_stats: Start the regular fetching of statistics
@@ -873,6 +875,7 @@ struct efx_nic_type {
int (*reset)(struct efx_nic *efx, enum reset_type method);
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
void (*prepare_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx);
void (*start_stats)(struct efx_nic *efx);
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9a4244..da386599ab68 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -362,6 +362,35 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
}
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+static inline bool
+efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,6 +402,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
@@ -391,7 +421,15 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
- efx_notify_tx_desc(tx_queue);
+
+ if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_notify_tx_desc(tx_queue);
+ }
}
/* Allocate hardware resources for a TX queue */
@@ -894,46 +932,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
channel->channel, EFX_QWORD_VAL(*event));
}
-/* Global events are basically PHY events */
-static void
-efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- bool handled = false;
-
- if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
- /* Ignored */
- handled = true;
- }
-
- if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
- efx->xmac_poll_required = true;
- handled = true;
- }
-
- if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
- EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen global RX_RESET event. Resetting.\n",
- channel->channel);
-
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- handled = true;
- }
-
- if (!handled)
- netif_err(efx, hw, efx->net_dev,
- "channel %d unknown global event "
- EFX_QWORD_FMT "\n", channel->channel,
- EFX_QWORD_VAL(*event));
-}
-
static void
efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{
@@ -1050,15 +1048,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_handle_generated_event(channel, &event);
break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- efx_handle_global_event(channel, &event);
- break;
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
@@ -1418,6 +1418,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
u32 queues;
int syserr;
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1664,7 +1670,7 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722d..eb0586925b51 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
+#include "spi.h"
/*
* Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
bool stats_pending;
struct timer_list stats_timer;
u32 *stats_dma_done;
+ struct efx_spi_device spi_flash;
+ struct efx_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -135,7 +146,6 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
u64 fw_version;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f3..ea3ae0089315 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -41,6 +41,8 @@
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
#define PCS_UC_STATUS_FW_SAVE 0x20
+#define PMA_PMD_MODE_REG 0xc301
+#define PMA_PMD_RXIN_SEL_LBN 6
#define PMA_PMD_FTX_CTRL2_REG 0xc309
#define PMA_PMD_FTX_STATIC_LBN 13
#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
+ * restart doesn't reset it. We need to do that ourselves. */
+ efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+ 1 << PMA_PMD_RXIN_SEL_LBN, false);
efx_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b5158e..3925fd621177 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -37,7 +37,7 @@
* This driver supports two methods for allocating and using RX buffers:
* each RX buffer may be backed by an skb or by an order-n page.
*
- * When LRO is in use then the second method has a lower overhead,
+ * When GRO is in use then the second method has a lower overhead,
* since we don't have to allocate then free skbs on reassembled frames.
*
* Values:
@@ -50,25 +50,25 @@
*
* - Since pushing and popping descriptors are separated by the rx_queue
* size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for LRO is less
- * than the performance hit of using page-based allocation of non-LRO,
+ * - The performance win by using page-based allocation for GRO is less
+ * than the performance hit of using page-based allocation of non-GRO,
* so the watermarks should reflect this.
*
* Per channel we maintain a single variable, updated by each channel:
*
- * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
+ * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
* RX_ALLOC_FACTOR_SKB)
* Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
* limits the hysteresis), and update the allocation strategy:
*
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
+ * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
* RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
*/
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-#define RX_ALLOC_LEVEL_LRO 0x2000
+#define RX_ALLOC_LEVEL_GRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_LRO 1
+#define RX_ALLOC_FACTOR_GRO 1
#define RX_ALLOC_FACTOR_SKB (-2)
/* This is the percentage fill level below which new RX descriptors
@@ -441,19 +441,19 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through the generic LRO stack
+/* Pass a received packet up through the generic GRO stack
*
* Handles driverlink veto, and passes the fragment up via
- * the appropriate LRO method
+ * the appropriate GRO method
*/
-static void efx_rx_packet_lro(struct efx_channel *channel,
+static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
- /* Pass the skb/page into the LRO engine */
+ /* Pass the skb/page into the GRO engine */
if (rx_buf->page) {
struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->page;
@@ -499,7 +499,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
if (gro_result == GRO_NORMAL) {
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
} else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
channel->irq_mod_score += 2;
}
}
@@ -605,7 +605,7 @@ void __efx_rx_packet(struct efx_channel *channel,
}
if (likely(checksummed || rx_buf->page)) {
- efx_rx_packet_lro(channel, rx_buf, checksummed);
+ efx_rx_packet_gro(channel, rx_buf, checksummed);
return;
}
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel)
{
enum efx_rx_alloc_method method = rx_alloc_method;
- /* Only makes sense to use page based allocation if LRO is enabled */
+ /* Only makes sense to use page based allocation if GRO is enabled */
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel)
channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
/* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
+ method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f58a258..bf8456176443 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
static int siena_probe_nvconfig(struct efx_nic *efx)
{
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
- if (rc)
- return rc;
-
- return 0;
+ return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
}
static int siena_probe_nic(struct efx_nic *efx)
@@ -562,7 +556,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
if (nic_data->wol_filter_id != -1)
efx_mcdi_wol_filter_remove(efx,
nic_data->wol_filter_id);
- rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address,
+ rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
&nic_data->wol_filter_id);
if (rc)
goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813a..879b7f6bde3d 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -61,6 +61,11 @@ struct efx_spi_device {
unsigned int block_size;
};
+static inline bool efx_spi_present(const struct efx_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command,
int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48c96ee..f102912eba91 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -15,9 +15,7 @@
#include "mdio_10g.h"
#include "nic.h"
#include "phy.h"
-#include "regs.h"
#include "workarounds.h"
-#include "selftest.h"
/* We expect these MMDs to be in the package. */
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 11726989fe2d..2f5e9da657bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,50 +30,6 @@
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-/* We need to be able to nest calls to netif_tx_stop_queue(), partly
- * because of the 2 hardware queues associated with each core queue,
- * but also so that we can inhibit TX for reasons other than a full
- * hardware queue. */
-void efx_stop_queue(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
-
- if (!tx_queue)
- return;
-
- spin_lock_bh(&channel->tx_stop_lock);
- netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
-
- atomic_inc(&channel->tx_stop_count);
- netif_tx_stop_queue(
- netdev_get_tx_queue(efx->net_dev,
- tx_queue->queue / EFX_TXQ_TYPES));
-
- spin_unlock_bh(&channel->tx_stop_lock);
-}
-
-/* Decrement core TX queue stop count and wake it if the count is 0 */
-void efx_wake_queue(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
-
- if (!tx_queue)
- return;
-
- local_bh_disable();
- if (atomic_dec_and_lock(&channel->tx_stop_count,
- &channel->tx_stop_lock)) {
- netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
- netif_tx_wake_queue(
- netdev_get_tx_queue(efx->net_dev,
- tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock(&channel->tx_stop_lock);
- }
- local_bh_enable();
-}
-
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
{
@@ -234,21 +190,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* checked. Update the xmit path's
* copy of read_count.
*/
- ++tx_queue->stopped;
+ netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the
- * change of stopped from the access
+ * change of queue state from the access
* of read_count. */
smp_mb();
tx_queue->old_read_count =
- *(volatile unsigned *)
- &tx_queue->read_count;
+ ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
- if (unlikely(q_space-- <= 0))
- goto stop;
+ if (unlikely(q_space-- <= 0)) {
+ rc = NETDEV_TX_BUSY;
+ goto unwind;
+ }
smp_mb();
- --tx_queue->stopped;
+ netif_tx_start_queue(tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -308,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
- goto unwind;
-
- stop:
- rc = NETDEV_TX_BUSY;
-
- if (tx_queue->stopped == 1)
- efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -407,22 +357,25 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index);
/* See if we need to restart the netif queue. This barrier
- * separates the update of read_count from the test of
- * stopped. */
+ * separates the update of read_count from the test of the
+ * queue state. */
smp_mb();
- if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+ }
- /* Do this under netif_tx_lock(), to avoid racing
- * with efx_xmit(). */
- netif_tx_lock(efx->net_dev);
- if (tx_queue->stopped) {
- tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->channel);
- }
- netif_tx_unlock(efx->net_dev);
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
}
}
}
@@ -470,9 +423,10 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
+ tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
- BUG_ON(tx_queue->stopped);
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -508,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free up TSO header cache */
efx_fini_tso(tx_queue);
-
- /* Release queue's stop on port, if any */
- if (tx_queue->stopped) {
- tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->channel);
- }
}
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
@@ -755,12 +703,12 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
*/
- ++tx_queue->stopped;
+ netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the change of
- * stopped from the access of read_count. */
+ * queue state from the access of read_count. */
smp_mb();
tx_queue->old_read_count =
- *(volatile unsigned *)&tx_queue->read_count;
+ ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
@@ -769,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
return 1;
}
smp_mb();
- --tx_queue->stopped;
+ netif_tx_start_queue(tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -1109,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
while (1) {
rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
- if (unlikely(rc))
- goto stop;
+ if (unlikely(rc)) {
+ rc2 = NETDEV_TX_BUSY;
+ goto unwind;
+ }
/* Move onto the next fragment? */
if (state.in_len == 0) {
@@ -1139,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
netif_err(efx, tx_err, efx->net_dev,
"Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any(skb);
- goto unwind;
-
- stop:
- rc2 = NETDEV_TX_BUSY;
-
- /* Stop the queue if it wasn't stopped before. */
- if (tx_queue->stopped == 1)
- efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec583..819c1750e2ab 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
break;
default:
break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(0, ioaddr + RTRATE);
+ writel(0, ioaddr + RTRATE);
break;
case 100:/* 100BASE */
- ctrl_outl(1, ioaddr + RTRATE);
+ writel(1, ioaddr + RTRATE);
break;
default:
break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
static void sh_eth_chip_reset(struct net_device *ndev)
{
/* reset device */
- ctrl_outl(ARSTR_ARSTR, ARSTR);
+ writel(ARSTR_ARSTR, ARSTR);
mdelay(1);
}
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
int cnt = 100;
- ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ writel(EDSR_ENALL, ioaddr + EDSR);
+ writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
while (cnt > 0) {
- if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+ if (!(readl(ioaddr + EDMR) & 0x3))
break;
mdelay(1);
cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
- ctrl_outl(0x0, ioaddr + TDLAR);
- ctrl_outl(0x0, ioaddr + TDFAR);
- ctrl_outl(0x0, ioaddr + TDFXR);
- ctrl_outl(0x0, ioaddr + TDFFR);
- ctrl_outl(0x0, ioaddr + RDLAR);
- ctrl_outl(0x0, ioaddr + RDFAR);
- ctrl_outl(0x0, ioaddr + RDFXR);
- ctrl_outl(0x0, ioaddr + RDFFR);
+ writel(0x0, ioaddr + TDLAR);
+ writel(0x0, ioaddr + TDFAR);
+ writel(0x0, ioaddr + TDFXR);
+ writel(0x0, ioaddr + TDFFR);
+ writel(0x0, ioaddr + RDLAR);
+ writel(0x0, ioaddr + RDFAR);
+ writel(0x0, ioaddr + RDFXR);
+ writel(0x0, ioaddr + RDFFR);
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(GECMR_10, ioaddr + GECMR);
+ writel(GECMR_10, ioaddr + GECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(GECMR_100, ioaddr + GECMR);
+ writel(GECMR_100, ioaddr + GECMR);
break;
case 1000: /* 1000BASE */
- ctrl_outl(GECMR_1000, ioaddr + GECMR);
+ writel(GECMR_1000, ioaddr + GECMR);
break;
default:
break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
{
u32 ioaddr = ndev->base_addr;
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
mdelay(3);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+ writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
}
#endif
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
{
u32 ioaddr = ndev->base_addr;
- ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
ioaddr + MAHR);
- ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
+ writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
ioaddr + MALR);
}
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
memcpy(ndev->dev_addr, mac, 6);
} else {
- ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
- ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
- ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
- ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
- ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
- ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+ ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
+ ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
+ ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
}
}
@@ -379,19 +379,19 @@ struct bb_info {
/* PHY bit set */
static void bb_set(u32 addr, u32 msk)
{
- ctrl_outl(ctrl_inl(addr) | msk, addr);
+ writel(readl(addr) | msk, addr);
}
/* PHY bit clear */
static void bb_clr(u32 addr, u32 msk)
{
- ctrl_outl((ctrl_inl(addr) & ~msk), addr);
+ writel((readl(addr) & ~msk), addr);
}
/* PHY bit read */
static int bb_read(u32 addr, u32 msk)
{
- return (ctrl_inl(addr) & msk) != 0;
+ return (readl(addr) & msk) != 0;
}
/* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
+ writel(mdp->rx_desc_dma, ioaddr + RDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
+ writel(mdp->rx_desc_dma, ioaddr + RDFAR);
#endif
}
}
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
txdesc->buffer_length = 0;
if (i == 0) {
/* Tx descriptor address set */
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
+ writel(mdp->tx_desc_dma, ioaddr + TDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
+ writel(mdp->tx_desc_dma, ioaddr + TDFAR);
#endif
}
}
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
+ writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
/* all sh_eth int mask */
- ctrl_outl(0, ioaddr + EESIPR);
+ writel(0, ioaddr + EESIPR);
#if defined(__LITTLE_ENDIAN__)
if (mdp->cd->hw_swap)
- ctrl_outl(EDMR_EL, ioaddr + EDMR);
+ writel(EDMR_EL, ioaddr + EDMR);
else
#endif
- ctrl_outl(0, ioaddr + EDMR);
+ writel(0, ioaddr + EDMR);
/* FIFO size set */
- ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
- ctrl_outl(0, ioaddr + TFTR);
+ writel(mdp->cd->fdr_value, ioaddr + FDR);
+ writel(0, ioaddr + TFTR);
/* Frame recv control */
- ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
+ writel(mdp->cd->rmcr_value, ioaddr + RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+ writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
if (mdp->cd->bculr)
- ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */
+ writel(0x800, ioaddr + BCULR); /* Burst sycle set */
- ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
+ writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
if (!mdp->cd->no_trimd)
- ctrl_outl(0, ioaddr + TRIMD);
+ writel(0, ioaddr + TRIMD);
/* Recv frame limit set register */
- ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
+ writel(RFLR_VALUE, ioaddr + RFLR);
- ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
- ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
+ writel(readl(ioaddr + EESR), ioaddr + EESR);
+ writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
/* PAUSE Prohibition */
- val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
+ val = (readl(ioaddr + ECMR) & ECMR_DM) |
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
- ctrl_outl(val, ioaddr + ECMR);
+ writel(val, ioaddr + ECMR);
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
/* E-MAC Status Register clear */
- ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
+ writel(mdp->cd->ecsr_value, ioaddr + ECSR);
/* E-MAC Interrupt Enable register */
- ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+ writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
if (mdp->cd->apr)
- ctrl_outl(APR_AP, ioaddr + APR);
+ writel(APR_AP, ioaddr + APR);
if (mdp->cd->mpr)
- ctrl_outl(MPR_MP, ioaddr + MPR);
+ writel(MPR_MP, ioaddr + MPR);
if (mdp->cd->tpauser)
- ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+ writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
/* Setting the Rx mode will start the Rx process. */
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+ writel(EDRRR_R, ioaddr + EDRRR);
netif_start_queue(ndev);
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
- ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+ if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
+ writel(EDRRR_R, ndev->base_addr + EDRRR);
return 0;
}
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
u32 mask;
if (intr_status & EESR_ECI) {
- felic_stat = ctrl_inl(ioaddr + ECSR);
- ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
+ felic_stat = readl(ioaddr + ECSR);
+ writel(felic_stat, ioaddr + ECSR); /* clear int */
if (felic_stat & ECSR_ICD)
mdp->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
else
link_stat = PHY_ST_LINK;
} else {
- link_stat = (ctrl_inl(ioaddr + PSR));
+ link_stat = (readl(ioaddr + PSR));
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
if (!(link_stat & PHY_ST_LINK)) {
/* Link Down : disable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) &
+ writel(readl(ioaddr + ECMR) &
~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
} else {
/* Link Up */
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
+ writel(readl(ioaddr + EESIPR) &
~DMAC_M_ECI, ioaddr + EESIPR);
/*clear int */
- ctrl_outl(ctrl_inl(ioaddr + ECSR),
+ writel(readl(ioaddr + ECSR),
ioaddr + ECSR);
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
+ writel(readl(ioaddr + EESIPR) |
DMAC_M_ECI, ioaddr + EESIPR);
/* enable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) |
+ writel(readl(ioaddr + ECMR) |
(ECMR_RE | ECMR_TE), ioaddr + ECMR);
}
}
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Descriptor Empty int */
mdp->stats.rx_over_errors++;
- if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+ if (readl(ioaddr + EDRRR) ^ EDRRR_R)
+ writel(EDRRR_R, ioaddr + EDRRR);
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
mask &= ~EESR_ADE;
if (intr_status & mask) {
/* Tx error */
- u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
+ u32 edtrr = readl(ndev->base_addr + EDTRR);
/* dmesg */
dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* SH7712 BUG */
if (edtrr ^ EDTRR_TRNS) {
/* tx dma start */
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
}
/* wakeup */
netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
spin_lock(&mdp->lock);
/* Get interrpt stat */
- intr_status = ctrl_inl(ioaddr + EESR);
+ intr_status = readl(ioaddr + EESR);
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
cd->tx_check | cd->eesr_err_check)) {
- ctrl_outl(intr_status, ioaddr + EESR);
+ writel(intr_status, ioaddr + EESR);
ret = IRQ_HANDLED;
} else
goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
+ writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
| ECMR_DM, ioaddr + ECMR);
new_state = 1;
mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* worning message out. */
printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
+ " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
/* tx_errors count up */
mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
mdp->cur_tx++;
- if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
+ writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
return NETDEV_TX_OK;
}
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
- ctrl_outl(0x0000, ioaddr + EESIPR);
+ writel(0x0000, ioaddr + EESIPR);
/* Stop the chip's Tx and Rx processes. */
- ctrl_outl(0, ioaddr + EDTRR);
- ctrl_outl(0, ioaddr + EDRRR);
+ writel(0, ioaddr + EDTRR);
+ writel(0, ioaddr + EDRRR);
/* PHY Disconnect */
if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
- mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
- ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
- mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
- ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
- ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
+ mdp->stats.tx_dropped += readl(ioaddr + TROCR);
+ writel(0, ioaddr + TROCR); /* (write clear) */
+ mdp->stats.collisions += readl(ioaddr + CDCR);
+ writel(0, ioaddr + CDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
+ writel(0, ioaddr + LCCR); /* (write clear) */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
- ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
- ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
+ writel(0, ioaddr + CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
+ writel(0, ioaddr + CEECR); /* (write clear) */
#else
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
- ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
+ writel(0, ioaddr + CNDCR); /* (write clear) */
#endif
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC) {
/* Set promiscuous. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
+ writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
ioaddr + ECMR);
} else {
/* Normal, unicast/broadcast-only mode. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
+ writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
ioaddr + ECMR);
}
}
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(u32 ioaddr)
{
- ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
- ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
- ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
- ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
- ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
- ctrl_outl(0, ioaddr + TSU_PRISL0);
- ctrl_outl(0, ioaddr + TSU_PRISL1);
- ctrl_outl(0, ioaddr + TSU_FWSL0);
- ctrl_outl(0, ioaddr + TSU_FWSL1);
- ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
+ writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
+ writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
+ writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
+ writel(0xc, ioaddr + TSU_BSYSL0);
+ writel(0xc, ioaddr + TSU_BSYSL1);
+ writel(0, ioaddr + TSU_PRISL0);
+ writel(0, ioaddr + TSU_PRISL1);
+ writel(0, ioaddr + TSU_FWSL0);
+ writel(0, ioaddr + TSU_FWSL1);
+ writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
+ writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
+ writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
#else
- ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
+ writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
+ writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
#endif
- ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
- ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
- ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
- ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
- ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
- ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
- ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
+ writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
+ writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
+ writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
+ writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
+ writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
+ writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
+ writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
}
#endif /* SH_ETH_HAS_TSU */
@@ -1552,7 +1552,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
sh_mdio_release(ndev);
unregister_netdev(ndev);
- flush_scheduled_work();
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 8b47763958f2..efa64221eede 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index a5d6a6bd0c1a..3406ed870917 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1915,9 +1915,10 @@ err_release_board:
static void __devexit sis190_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct sis190_private *tp = netdev_priv(dev);
sis190_mii_remove(dev);
- flush_scheduled_work();
+ cancel_work_sync(&tp->phy_task);
unregister_netdev(dev);
sis190_release_board(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 2d9941c045bc..1e1bd0c201c8 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1263,7 +1263,7 @@ void smt_set_timestamp(struct s_smc *smc, u_char *p)
static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
{
int i ;
- u_char *map ;
+ const u_char *map ;
u_short in ;
u_short out ;
@@ -1271,7 +1271,7 @@ static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
* MIB para 101b (fddiSMTConnectionPolicy) coding
* is different from 0005 coding
*/
- static u_char ansi_weirdness[16] = {
+ static const u_char ansi_weirdness[16] = {
0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15
} ;
SMTSETPARA(policy,SMT_P_POLICY) ;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..c149e48a0f57 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1191,7 +1191,7 @@ static void genesis_init(struct skge_hw *hw)
static void genesis_reset(struct skge_hw *hw, int port)
{
- const u8 zero[8] = { 0 };
+ static const u8 zero[8] = { 0 };
u32 reg;
skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
@@ -1557,7 +1557,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
int i;
u32 r;
- const u8 zero[6] = { 0 };
+ static const u8 zero[6] = { 0 };
for (i = 0; i < 10; i++) {
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
@@ -2764,7 +2764,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const int offset = skb_transport_offset(skb);
+ const int offset = skb_checksum_start_offset(skb);
/* This seems backwards, but it is what the sk98lin
* does. Looks like hardware is wrong?
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
@@ -4013,8 +4012,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
if (!hw)
return;
- flush_scheduled_work();
-
dev1 = hw->dev[1];
if (dev1)
unregister_netdev(dev1);
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index d2dd8e6113ab..235a3c6c9f91 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -277,8 +277,12 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
{
- int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
- short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+ static const int addr_tbl[4] = {
+ 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000
+ };
+ static const short num_pages_tbl[4] = {
+ 0x20, 0x40, 0x80, 0xff
+ };
dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
num_pages = num_pages_tbl[(addr >> 4) & 3];
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e12a879..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e13224..5f06c4706abe 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Apr_2010"
+#define DRV_MODULE_VERSION "Nov_2010"
#include <linux/platform_device.h>
#include <linux/stmmac.h>
@@ -37,7 +37,6 @@ struct stmmac_priv {
unsigned int cur_tx;
unsigned int dirty_tx;
unsigned int dma_tx_size;
- int tx_coe;
int tx_coalesce;
struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
struct sk_buff_head rx_recycle;
struct net_device *dev;
- int is_gmac;
dma_addr_t dma_rx_phy;
unsigned int dma_rx_size;
unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
struct napi_struct napi;
phy_interface_t phy_interface;
- int pbl;
- int bus_id;
int phy_addr;
int phy_mask;
int (*phy_reset) (void *priv);
- void (*fix_mac_speed) (void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *bsp_priv;
+ int rx_coe;
+ int no_csum_insertion;
int phy_irq;
struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_clk_csr;
u32 msg_enable;
spinlock_t lock;
int wolopts;
int wolenabled;
- int shutdown;
#ifdef CONFIG_STMMAC_TIMER
struct stmmac_timer *tm;
#endif
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
- int enh_desc;
- int rx_coe;
- int bugged_jumbo;
- int no_csum_insertion;
+ struct plat_stmmacenet_data *plat;
};
-#ifdef CONFIG_STM_DRIVERS
-#include <linux/stm/pad.h>
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
- int ret = 0;
- struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
-
- /* Pad routing setup */
- if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
- dev_name(&pdev->dev)))) {
- printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
- ret = -ENODEV;
- }
- return ret;
-}
-#else
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
-
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789a..fd719edc7f7c 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->is_gmac)
+ if (!priv->plat->has_gmac)
strcpy(info->driver, MAC100_ETHTOOL_NAME);
else
strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
memset(reg_space, 0x0, REG_SPACE_SIZE);
- if (!priv->is_gmac) {
+ if (!priv->plat->has_gmac) {
/* MAC registers */
for (i = 0; i < 12; i++)
reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
}
}
-static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_rx_csum = stmmac_ethtool_get_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = stmmac_ethtool_set_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..34a0af3837f9 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
}
+/* On some ST platforms, some HW system configuraton registers have to be
+ * set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (likely(priv->plat->fix_mac_speed))
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv,
+ phydev->speed);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->is_gmac))
+ if (likely(priv->plat->has_gmac))
ctrl &= ~priv->hw->link.port;
- if (likely(priv->fix_mac_speed))
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->is_gmac) {
+ if (priv->plat->has_gmac) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
} else {
ctrl &= ~priv->hw->link.port;
}
- if (likely(priv->fix_mac_speed))
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ stmmac_hw_fix_mac_speed(priv);
break;
default:
if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
- snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+ if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
/* In case of GMAC, SF mode has to be enabled
* to perform the TX COE. This depends on:
* 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+ if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
priv->dma_tx_phy,
priv->dma_rx_phy) < 0)) {
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
/* If required, perform hw setup of the bus. */
- if (priv->bus_setup)
- priv->bus_setup(priv->ioaddr);
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
if (priv->rx_coe)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
- if (priv->tx_coe)
+ if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
- priv->shutdown = 0;
-
/* Initialise the MMC (if present) to disable all interrupts. */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -943,7 +949,7 @@ static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
skb, skb->len);
segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
- if (unlikely(IS_ERR(segs)))
+ if (IS_ERR(segs))
goto sw_tso_end;
do {
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_sw_tso(priv, skb);
if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
+ if (unlikely((!priv->plat->tx_coe) ||
+ (priv->no_csum_insertion)))
skb_checksum_help(skb);
else
csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
DMA_FROM_DEVICE);
(p + entry)->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->is_gmac)) {
+ if (unlikely(priv->plat->has_gmac)) {
if (bfsize >= BUF_SIZE_8KiB)
(p + entry)->des3 =
(p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
max_mtu = JUMBO_LEN;
else
max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
* the TX csum insertionin the TDES and not use SF. */
- if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+ if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
priv->no_csum_insertion = 1;
else
priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
/* To handle GMAC own interrupts */
priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
@@ -1487,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
dev->netdev_ops = &stmmac_netdev_ops;
stmmac_set_ethtool_ops(dev);
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
@@ -1509,6 +1517,8 @@ static int stmmac_probe(struct net_device *dev)
pr_warning("\tno valid MAC address;"
"please, use ifconfig or nwhwconfig!\n");
+ spin_lock_init(&priv->lock);
+
ret = register_netdev(dev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
@@ -1518,9 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
-
- spin_lock_init(&priv->lock);
+ (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
return ret;
}
@@ -1536,7 +1544,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
device = dwmac1000_setup(priv->ioaddr);
else
device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
if (!device)
return -ENOMEM;
- if (priv->enh_desc) {
+ if (priv->plat->enh_desc) {
device->desc = &enh_desc_ops;
pr_info("\tEnhanced descriptor structure\n");
} else
@@ -1598,7 +1606,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
plat_dat->bus_id);
/* Check that this phy is for the MAC being initialised */
- if (priv->bus_id != plat_dat->bus_id)
+ if (priv->plat->bus_id != plat_dat->bus_id)
return 0;
/* OK, this PHY is connected to the MAC.
@@ -1634,15 +1642,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr = NULL;
struct net_device *ndev = NULL;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat;
pr_info("STMMAC driver:\n\tplatform registration... ");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto out;
- }
+ if (!res)
+ return -ENODEV;
pr_info("\tdone!\n");
if (!request_mem_region(res->start, resource_size(res),
@@ -1650,22 +1656,21 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
pr_err("%s: ERROR: memory allocation failed"
"cannot get the I/O addr 0x%x\n",
__func__, (unsigned int)res->start);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
addr = ioremap(res->start, resource_size(res));
if (!addr) {
pr_err("%s: ERROR: memory mapping failed\n", __func__);
ret = -ENOMEM;
- goto out;
+ goto out_release_region;
}
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev) {
pr_err("%s: ERROR: allocating the device\n", __func__);
ret = -ENOMEM;
- goto out;
+ goto out_unmap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1675,21 +1680,17 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
if (ndev->irq == -ENXIO) {
pr_err("%s: ERROR: MAC IRQ configuration "
"information not found\n", __func__);
- ret = -ENODEV;
- goto out;
+ ret = -ENXIO;
+ goto out_free_ndev;
}
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
plat_dat = pdev->dev.platform_data;
- priv->bus_id = plat_dat->bus_id;
- priv->pbl = plat_dat->pbl; /* TLI */
- priv->mii_clk_csr = plat_dat->clk_csr;
- priv->tx_coe = plat_dat->tx_coe;
- priv->bugged_jumbo = plat_dat->bugged_jumbo;
- priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
- priv->enh_desc = plat_dat->enh_desc;
+
+ priv->plat = plat_dat;
+
priv->ioaddr = addr;
/* PMT module is not integrated in all the MAC devices. */
@@ -1703,20 +1704,22 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
- /* Verify embedded resource for the platform */
- ret = stmmac_claim_resource(pdev);
- if (ret < 0)
- goto out;
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_free_ndev;
+ }
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
- goto out;
+ goto out_plat_exit;
/* Network Device Registration */
ret = stmmac_probe(ndev);
if (ret < 0)
- goto out;
+ goto out_plat_exit;
/* associate a PHY - it is provided by another platform bus */
if (!driver_for_each_device
@@ -1724,31 +1727,33 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
stmmac_associate_phy)) {
pr_err("No PHY device is associated with this MAC!\n");
ret = -ENODEV;
- goto out;
+ goto out_unregister;
}
- priv->fix_mac_speed = plat_dat->fix_mac_speed;
- priv->bus_setup = plat_dat->bus_setup;
- priv->bsp_priv = plat_dat->bsp_priv;
-
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
"\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
pdev->id, ndev->irq, addr);
/* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+ pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
ret = stmmac_mdio_register(ndev);
if (ret < 0)
- goto out;
+ goto out_unregister;
pr_debug("registered!\n");
+ return 0;
-out:
- if (ret < 0) {
- platform_set_drvdata(pdev, NULL);
- release_mem_region(res->start, resource_size(res));
- if (addr != NULL)
- iounmap(addr);
- }
+out_unregister:
+ unregister_netdev(ndev);
+out_plat_exit:
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+out_free_ndev:
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+out_unmap:
+ iounmap(addr);
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1777,6 +1782,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
stmmac_mdio_unregister(ndev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
@@ -1790,69 +1798,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+static int stmmac_suspend(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
- if (!dev || !netif_running(dev))
+ if (!ndev || !netif_running(ndev))
return 0;
spin_lock(&priv->lock);
- if (state.event == PM_EVENT_SUSPEND) {
- netif_device_detach(dev);
- netif_stop_queue(dev);
- if (priv->phydev)
- phy_stop(priv->phydev);
+ netif_device_detach(ndev);
+ netif_stop_queue(ndev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_stop();
- if (likely(priv->tm->enable))
- dis_ic = 1;
+ priv->tm->timer_stop();
+ if (likely(priv->tm->enable))
+ dis_ic = 1;
#endif
- napi_disable(&priv->napi);
-
- /* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
- dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
-
- /* Enable Power down mode by programming the PMT regs */
- if (device_can_wakeup(priv->device))
- priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
- else
- stmmac_disable_mac(priv->ioaddr);
- } else {
- priv->shutdown = 1;
- /* Although this can appear slightly redundant it actually
- * makes fast the standby operation and guarantees the driver
- * working if hibernation is on media. */
- stmmac_release(dev);
- }
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ /* Clear the Rx/Tx descriptors */
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+ else
+ stmmac_disable_mac(priv->ioaddr);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct platform_device *pdev)
+static int stmmac_resume(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return 0;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
- if (priv->shutdown) {
- /* Re-open the interface and re-init the MAC/DMA
- and the rings (i.e. on hibernation stage) */
- stmmac_open(dev);
+ if (!netif_running(ndev))
return 0;
- }
spin_lock(&priv->lock);
@@ -1861,10 +1854,10 @@ static int stmmac_resume(struct platform_device *pdev)
* is received. Anyway, it's better to manually clear
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console). */
- if (device_can_wakeup(priv->device))
+ if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, 0);
- netif_device_attach(dev);
+ netif_device_attach(ndev);
/* Enable the MAC and DMA */
stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1865,59 @@ static int stmmac_resume(struct platform_device *pdev)
priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_start(tmrate);
+ if (likely(priv->tm->enable))
+ priv->tm->timer_start(tmrate);
#endif
napi_enable(&priv->napi);
if (priv->phydev)
phy_start(priv->phydev);
- netif_start_queue(dev);
+ netif_start_queue(ndev);
spin_unlock(&priv->lock);
return 0;
}
-#endif
-static struct platform_driver stmmac_driver = {
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- },
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
-#ifdef CONFIG_PM
+static int stmmac_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_release(ndev);
+}
+
+static int stmmac_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_open(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pm_ops = {
.suspend = stmmac_suspend,
.resume = stmmac_resume,
-#endif
+ .freeze = stmmac_freeze,
+ .thaw = stmmac_restore,
+ .restore = stmmac_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pm_ops;
+#endif /* CONFIG_PM */
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_dvr_probe,
+ .remove = stmmac_dvr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pm_ops,
+ },
};
/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357d..234b4068a1fc 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
((phyreg << 6) & (0x000007C0)));
- regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+ regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
| MII_WRITE;
- value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+ value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
/* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
- priv->phy_reset(priv->bsp_priv);
+ priv->phy_reset(priv->plat->bsp_priv);
}
/* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 3ed2a67bd6d3..e5662962c7bf 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -294,6 +294,9 @@ enum alta_offsets {
/* Aliased and bogus values! */
RxStatus = 0x0c,
};
+
+#define ASIC_HI_WORD(x) ((x) + 2)
+
enum ASICCtrl_HiWord_bit {
GlobalReset = 0x0001,
RxReset = 0x0002,
@@ -431,6 +434,7 @@ static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_error(struct net_device *dev, int intr_status);
static void set_rx_mode(struct net_device *dev);
static int __set_mac_addr(struct net_device *dev);
+static int sundance_set_mac_addr(struct net_device *dev, void *data);
static struct net_device_stats *get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
@@ -464,7 +468,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1592,6 +1596,19 @@ static int __set_mac_addr(struct net_device *dev)
return 0;
}
+/* Invoked with rtnl_lock held */
+static int sundance_set_mac_addr(struct net_device *dev, void *data)
+{
+ const struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ __set_mac_addr(dev);
+
+ return 0;
+}
+
static const struct {
const char name[ETH_GSTRING_LEN];
} sundance_stats[] = {
@@ -1772,10 +1789,10 @@ static int netdev_close(struct net_device *dev)
}
iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
- ioaddr +ASICCtrl + 2);
+ ioaddr + ASIC_HI_WORD(ASICCtrl));
for (i = 2000; i > 0; i--) {
- if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+ if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
break;
mdelay(1);
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4ceb3cf6a9a9..1c5408f83937 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1004,7 +1004,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = (TXDCTRL_CENAB |
@@ -2380,10 +2380,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
*/
mutex_unlock(&gp->pm_mutex);
- /* Wait for a pending reset task to complete */
- while (gp->reset_task_pending)
- yield();
- flush_scheduled_work();
+ /* Wait for the pending reset task to complete */
+ flush_work_sync(&gp->reset_task);
/* Shut the PHY down eventually and setup WOL */
gem_stop_phy(gp, gp->asleep_wol);
@@ -2928,10 +2926,8 @@ static void gem_remove_one(struct pci_dev *pdev)
/* We shouldn't need any locking here */
gem_get_cell(gp);
- /* Wait for a pending reset task to complete */
- while (gp->reset_task_pending)
- yield();
- flush_scheduled_work();
+ /* Cancel reset task */
+ cancel_work_sync(&gp->reset_task);
/* Shut the PHY down */
gem_stop_phy(gp, 0);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 5e28c414421a..55bbb9c15d96 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2266,7 +2266,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u32 csum_start_off = skb_transport_offset(skb);
+ const u32 csum_start_off = skb_checksum_start_offset(skb);
const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 2cf84e5968b2..767e1e2b210d 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1295,17 +1295,9 @@ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvin
strcpy(info->version, "2.02");
}
-static u32 sparc_lance_get_link(struct net_device *dev)
-{
- /* We really do not keep track of this, but this
- * is better than not reporting anything at all.
- */
- return 1;
-}
-
static const struct ethtool_ops sparc_lance_ethtool_ops = {
.get_drvinfo = sparc_lance_get_drvinfo,
- .get_link = sparc_lance_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops sparc_lance_ops = {
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 8b3dc1eb4015..296000bf5a25 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv)
ENTER;
master = READ_REG(priv, regINIT_SEMAPHORE);
if (!READ_REG(priv, regINIT_STATUS) && master) {
- rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
+ rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
if (rc)
goto out;
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
@@ -2510,4 +2510,4 @@ module_exit(bdx_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(BDX_DRV_DESC);
-MODULE_FIRMWARE("tehuti/firmware.bin");
+MODULE_FIRMWARE("tehuti/bdx.bin");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097a..92fc29910c2d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
@@ -69,10 +70,10 @@
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 115
+#define TG3_MIN_NUM 116
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "October 14, 2010"
+#define DRV_MODULE_RELDATE "December 3, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1769,9 +1770,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
current_link_up == 1 &&
- (tp->link_config.active_speed == SPEED_1000 ||
- (tp->link_config.active_speed == SPEED_100 &&
- tp->link_config.active_duplex == DUPLEX_FULL))) {
+ tp->link_config.active_duplex == DUPLEX_FULL &&
+ (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_1000)) {
u32 eeectl;
if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,7 +1782,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tw32(TG3_CPMU_EEE_CTRL, eeectl);
- tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
+ tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+ TG3_CL45_D7_EEERES_STAT, &val);
if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
@@ -2728,12 +2730,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
- mac_mode |= tp->mac_mode &
- (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
- if (mac_mode & MAC_MODE_APE_TX_EN)
- mac_mode |= MAC_MODE_TDE_ENABLE;
- }
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ mac_mode |= MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
tw32_f(MAC_MODE, mac_mode);
udelay(100);
@@ -2969,7 +2969,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
}
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- u32 val = 0;
+ u32 val;
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2986,19 +2986,18 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
val | MII_TG3_DSP_CH34TP2_HIBW01);
+ val = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
/* Advertise 100-BaseTX EEE ability */
if (tp->link_config.advertising &
- (ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full))
- val |= TG3_CL45_D7_EEEADV_CAP_100TX;
+ ADVERTISED_100baseT_Full)
+ val |= MDIO_AN_EEE_ADV_100TX;
/* Advertise 1000-BaseT EEE ability */
if (tp->link_config.advertising &
- (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full))
- val |= TG3_CL45_D7_EEEADV_CAP_1000T;
+ ADVERTISED_1000baseT_Full)
+ val |= MDIO_AN_EEE_ADV_1000T;
}
- tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
+ tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
/* Turn off SM_DSP clock. */
val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -5763,7 +5762,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
- !mss && skb->len > ETH_DATA_LEN)
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5997,7 +5996,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
#endif
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
- !mss && skb->len > ETH_DATA_LEN)
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
len = skb_headlen(skb);
@@ -6339,13 +6338,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
kfree(tpr->rx_jmb_buffers);
tpr->rx_jmb_buffers = NULL;
if (tpr->rx_std) {
- pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
- tpr->rx_std, tpr->rx_std_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+ tpr->rx_std, tpr->rx_std_mapping);
tpr->rx_std = NULL;
}
if (tpr->rx_jmb) {
- pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
- tpr->rx_jmb, tpr->rx_jmb_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+ tpr->rx_jmb, tpr->rx_jmb_mapping);
tpr->rx_jmb = NULL;
}
}
@@ -6358,8 +6357,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
if (!tpr->rx_std_buffers)
return -ENOMEM;
- tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
- &tpr->rx_std_mapping);
+ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_STD_RING_BYTES(tp),
+ &tpr->rx_std_mapping,
+ GFP_KERNEL);
if (!tpr->rx_std)
goto err_out;
@@ -6370,9 +6371,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
if (!tpr->rx_jmb_buffers)
goto err_out;
- tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
- TG3_RX_JMB_RING_BYTES(tp),
- &tpr->rx_jmb_mapping);
+ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_JMB_RING_BYTES(tp),
+ &tpr->rx_jmb_mapping,
+ GFP_KERNEL);
if (!tpr->rx_jmb)
goto err_out;
}
@@ -6491,7 +6493,7 @@ static void tg3_free_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->tx_ring) {
- pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
tnapi->tx_ring, tnapi->tx_desc_mapping);
tnapi->tx_ring = NULL;
}
@@ -6500,25 +6502,26 @@ static void tg3_free_consistent(struct tg3 *tp)
tnapi->tx_buffers = NULL;
if (tnapi->rx_rcb) {
- pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
- tnapi->rx_rcb,
- tnapi->rx_rcb_mapping);
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
tnapi->rx_rcb = NULL;
}
tg3_rx_prodring_fini(tp, &tnapi->prodring);
if (tnapi->hw_status) {
- pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
- tnapi->hw_status,
- tnapi->status_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
tnapi->hw_status = NULL;
}
}
if (tp->hw_stats) {
- pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
- tp->hw_stats, tp->stats_mapping);
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
}
@@ -6531,9 +6534,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = pci_alloc_consistent(tp->pdev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping);
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping,
+ GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -6543,9 +6547,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = pci_alloc_consistent(tp->pdev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping);
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
@@ -6566,9 +6571,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
if (!tnapi->tx_buffers)
goto err_out;
- tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
- TG3_TX_RING_BYTES,
- &tnapi->tx_desc_mapping);
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
if (!tnapi->tx_ring)
goto err_out;
}
@@ -6601,9 +6607,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
continue;
- tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
@@ -6987,7 +6994,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
- pcie_set_readrq(tp->pdev, 4096);
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
else {
pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
tp->pci_cacheline_sz);
@@ -7181,7 +7188,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->pcie_cap + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, 4096);
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
/* Clear error status */
pci_write_config_word(tp->pdev,
@@ -7222,19 +7229,21 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ tp->mac_mode = MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
+
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
- tw32_f(MAC_MODE, tp->mac_mode);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ val = tp->mac_mode;
} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
- tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
- tw32_f(MAC_MODE, tp->mac_mode);
- } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
- tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
- if (tp->mac_mode & MAC_MODE_APE_TX_EN)
- tp->mac_mode |= MAC_MODE_TDE_ENABLE;
- tw32_f(MAC_MODE, tp->mac_mode);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ val = tp->mac_mode;
} else
- tw32_f(MAC_MODE, 0);
+ val = 0;
+
+ tw32_f(MAC_MODE, val);
udelay(40);
tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7810,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
+ /* Enable MAC control of LPI */
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+ TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+ tw32_f(TG3_CPMU_EEE_CTRL,
+ TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+ TG3_CPMU_EEEMD_LPI_IN_TX |
+ TG3_CPMU_EEEMD_LPI_IN_RX |
+ TG3_CPMU_EEEMD_EEE_ENABLE;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+ tw32_f(TG3_CPMU_EEE_MODE, val);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR1,
+ TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+ TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR2,
+ TG3_CPMU_DBTMR1_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+ }
+
if (reset_phy)
tg3_phy_reset(tp);
@@ -7860,18 +7900,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
- /* Access the lower 1K of PL PCIE block registers. */
- val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
- tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
- val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
- tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
- val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
- tw32(GRC_MODE, grc_mode);
+ tw32(GRC_MODE, grc_mode);
+ }
val = tr32(TG3_CPMU_LSPD_10MB_CLK);
val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7922,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_LSPD_10MB_CLK, val);
}
- /* Enable MAC control of LPI */
- if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
- TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
- TG3_CPMU_EEE_LNKIDL_UART_IDL);
-
- tw32_f(TG3_CPMU_EEE_CTRL,
- TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
-
- tw32_f(TG3_CPMU_EEE_MODE,
- TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
- TG3_CPMU_EEEMD_LPI_IN_TX |
- TG3_CPMU_EEEMD_LPI_IN_RX |
- TG3_CPMU_EEEMD_EEE_ENABLE);
- }
-
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -8162,8 +8189,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8229,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+ }
tw32(TG3_RDMA_RSRVCTRL_REG,
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
@@ -8280,7 +8310,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
- tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = 0;
tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9061,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
pci_disable_msix(tp->pdev);
return false;
}
- if (tp->irq_cnt > 1)
+
+ if (tp->irq_cnt > 1) {
tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+ netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+ }
+ }
return true;
}
@@ -12411,8 +12447,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (cfg2 & (1 << 18))
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
- if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
@@ -12548,9 +12585,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
}
}
- if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
@@ -13047,17 +13086,15 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
return 512;
}
+DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+ { },
+};
+
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
- static struct pci_device_id write_reorder_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA,
- PCI_DEVICE_ID_VIA_8385_0) },
- { },
- };
u32 misc_ctrl_reg;
u32 pci_state_reg, grc_misc_cfg;
u32 val;
@@ -13359,7 +13396,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
- pcie_set_readrq(tp->pdev, 4096);
+ tp->pcie_readrq = 4096;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ u16 word;
+
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKSTA,
+ &word);
+ switch (word & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ word &= PCI_EXP_LNKSTA_NLW;
+ word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+ switch (word) {
+ case 2:
+ tp->pcie_readrq = 2048;
+ break;
+ case 4:
+ tp->pcie_readrq = 1024;
+ break;
+ }
+ break;
+
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ word &= PCI_EXP_LNKSTA_NLW;
+ word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+ switch (word) {
+ case 1:
+ tp->pcie_readrq = 2048;
+ break;
+ case 2:
+ tp->pcie_readrq = 1024;
+ break;
+ case 4:
+ tp->pcie_readrq = 512;
+ break;
+ }
+ }
+ }
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
pci_read_config_word(tp->pdev,
tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13797,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Preserve the APE MAC_MODE bits */
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
- tp->mac_mode = tr32(MAC_MODE) |
- MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = TG3_DEF_MAC_MODE;
@@ -14153,13 +14227,19 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
#define TEST_BUFFER_SIZE 0x2000
+DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+ { },
+};
+
static int __devinit tg3_test_dma(struct tg3 *tp)
{
dma_addr_t buf_dma;
u32 *buf, saved_dma_rwctrl;
int ret = 0;
- buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
+ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+ &buf_dma, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out_nofree;
@@ -14321,11 +14401,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
- static struct pci_device_id dma_wait_state_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
- PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
- { },
- };
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
@@ -14343,7 +14418,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
}
out:
- pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
+ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
out_nofree:
return ret;
}
@@ -14957,7 +15032,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
if (tp->fw)
release_firmware(tp->fw);
- flush_scheduled_work();
+ cancel_work_sync(&tp->reset_task);
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
tg3_phy_fini(tp);
@@ -14996,7 +15071,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ flush_work_sync(&tp->reset_task);
tg3_phy_stop(tp);
tg3_netif_stop(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9f..d62c8d937c82 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1094,13 +1094,19 @@
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
-#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
-#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
-#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
-#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
-#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
-/* 0x36b4 --> 0x36b8 unused */
-
+#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
+#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
+#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
+#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
+#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
+#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
+#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
+#define TG3_CPMU_EEE_DBTMR1 0x000036b4
+#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
+#define TG3_CPMU_EEE_DBTMR2 0x000036b8
+#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -1327,6 +1333,8 @@
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
/* 0x4904 --> 0x4910 unused */
#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2170,9 +2178,6 @@
#define MII_TG3_TEST1_CRC_EN 0x8000
/* Clause 45 expansion registers */
-#define TG3_CL45_D7_EEEADV_CAP 0x003c
-#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
-#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
#define TG3_CL45_D7_EEERES_STAT 0x803e
#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
@@ -2562,10 +2567,6 @@ struct ring_info {
DEFINE_DMA_UNMAP_ADDR(mapping);
};
-struct tg3_config_info {
- u32 flags;
-};
-
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
@@ -2713,17 +2714,17 @@ struct tg3_napi {
u32 last_irq_tag;
u32 int_mbox;
u32 coal_now;
- u32 tx_prod;
- u32 tx_cons;
- u32 tx_pending;
- u32 prodmbox;
- u32 consmbox;
+ u32 consmbox ____cacheline_aligned;
u32 rx_rcb_ptr;
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
-
struct tg3_rx_buffer_desc *rx_rcb;
+
+ u32 tx_prod ____cacheline_aligned;
+ u32 tx_cons;
+ u32 tx_pending;
+ u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct ring_info *tx_buffers;
@@ -2946,6 +2947,7 @@ struct tg3 {
int pcix_cap;
int pcie_cap;
};
+ int pcie_readrq;
struct mii_bus *mdio_bus;
int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 91e6c78271a3..4786497de03e 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -657,8 +657,9 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
#ifndef PCMCIA
/* finish figuring the shared RAM address */
if (cardpresent == TR_ISA) {
- static __u32 ram_bndry_mask[] =
- { 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 };
+ static const __u32 ram_bndry_mask[] = {
+ 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
+ };
__u32 new_base, rrr_32, chk_base, rbm;
rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..b13c6b040be3 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -964,7 +964,7 @@ static void de_set_media (struct de_private *de)
dw32(MacMode, macmode);
}
-static void de_next_media (struct de_private *de, u32 *media,
+static void de_next_media (struct de_private *de, const u32 *media,
unsigned int n_media)
{
unsigned int i;
@@ -1008,10 +1008,10 @@ static void de21040_media_timer (unsigned long data)
return;
if (de->media_type == DE_MEDIA_AUI) {
- u32 next_state = DE_MEDIA_TP;
+ static const u32 next_state = DE_MEDIA_TP;
de_next_media(de, &next_state, 1);
} else {
- u32 next_state = DE_MEDIA_AUI;
+ static const u32 next_state = DE_MEDIA_AUI;
de_next_media(de, &next_state, 1);
}
@@ -1136,13 +1136,19 @@ static void de21041_media_timer (unsigned long data)
* simply resets the PHY and reloads the current media settings.
*/
if (de->media_type == DE_MEDIA_AUI) {
- u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ static const u32 next_states[] = {
+ DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else if (de->media_type == DE_MEDIA_BNC) {
- u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
+ static const u32 next_states[] = {
+ DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else {
- u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ static const u32 next_states[] = {
+ DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
}
@@ -2021,7 +2027,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d1a269..7064e035757a 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
DMFE_DBUG(0, "dmfe_start_xmit", 0);
- /* Resource flag check */
- netif_stop_queue(dev);
-
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
spin_lock_irqsave(&db->lock, flags);
/* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2c39f2591216..5c01e260f1ba 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1302,17 +1302,18 @@ static const struct net_device_ops tulip_netdev_ops = {
#endif
};
+DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+ { },
+};
+
static int __devinit tulip_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
- static struct pci_device_id early_486_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
- { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
- { },
- };
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
int i, irq;
@@ -1682,7 +1683,9 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->full_duplex_lock = 1;
if (tulip_media_cap[tp->default_port] & MediaIsMII) {
- u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ static const u16 media2advert[] = {
+ 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
+ };
tp->mii_advertise = media2advert[tp->default_port - 9];
tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 55f3a3e667a9..7599c457abd1 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -757,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb->csum_start - skb_headroom(skb);
+ gso.csum_start = skb_checksum_start_offset(skb);
gso.csum_offset = skb->csum_offset;
} /* else everything is zero */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5b83c3f35f47..a3c46f6a15e7 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1004,7 +1004,6 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, UTS_RELEASE);
strcpy(info->bus_info, pci_name(pci_dev));
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
ugeth_vdbg("%s: IN", __func__);
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- /* Tell the kernel the link is down */
- phy_stop(phydev);
-
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
ucc_geth_memclean(ugeth);
}
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
napi_disable(&ugeth->napi);
+ cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
- ucc_geth_close(dev);
- ucc_geth_open(dev);
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- netif_carrier_off(dev);
schedule_work(&ugeth->timeout_work);
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 512
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe6db0e..6f600cced6e1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
IEEE 802 "local assignment" bit is set in the address, a "usbX"
name is used instead.
+config USB_NET_CDC_NCM
+ tristate "CDC NCM support"
+ depends on USB_USBNET
+ default y
+ help
+ This driver provides support for CDC NCM (Network Control Model
+ Device USB Class Specification). The CDC NCM specification is
+ available from <http://www.usb.org/>.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module.
+
+ This driver should work with at least the following devices:
+ * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
+ * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
+ * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
+ * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
+ * Ericsson F5521gw Mobile Broadband Module
+
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b0259ae16..cac170301187 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
+obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index aea4645be7f6..6140b56cce53 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1508,6 +1508,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0b95, 0x1780),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Logitec LAN-GTJ/U2A
+ USB_DEVICE (0x0789, 0x0160),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// Linksys USB200M Rev 2
USB_DEVICE (0x13b1, 0x0018),
.driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 000000000000..593c104ab199
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1213 @@
+/*
+ * cdc_ncm.c
+ *
+ * Copyright (C) ST-Ericsson 2010
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc.h>
+
+#define DRIVER_VERSION "30-Nov-2010"
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+
+/* Maximum NTB length */
+#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
+
+#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
+
+/* Default value for MaxDatagramSize */
+#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry. Any additional datagrams in NTB would be discarded.
+ */
+#define CDC_NCM_DPT_DATAGRAMS_MAX 32
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
+
+/* The following macro defines the minimum header space */
+#define CDC_NCM_MIN_HDR_SIZE \
+ (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
+ (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+struct connection_speed_change {
+ __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
+ __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
+} __attribute__ ((packed));
+
+struct cdc_ncm_data {
+ struct usb_cdc_ncm_nth16 nth16;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
+};
+
+struct cdc_ncm_ctx {
+ struct cdc_ncm_data rx_ncm;
+ struct cdc_ncm_data tx_ncm;
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
+ struct timer_list tx_timer;
+
+ const struct usb_cdc_ncm_desc *func_desc;
+ const struct usb_cdc_header_desc *header_desc;
+ const struct usb_cdc_union_desc *union_desc;
+ const struct usb_cdc_ether_desc *ether_desc;
+
+ struct net_device *netdev;
+ struct usb_device *udev;
+ struct usb_host_endpoint *in_ep;
+ struct usb_host_endpoint *out_ep;
+ struct usb_host_endpoint *status_ep;
+ struct usb_interface *intf;
+ struct usb_interface *control;
+ struct usb_interface *data;
+
+ struct sk_buff *tx_curr_skb;
+ struct sk_buff *tx_rem_skb;
+
+ spinlock_t mtx;
+
+ u32 tx_timer_pending;
+ u32 tx_curr_offset;
+ u32 tx_curr_last_offset;
+ u32 tx_curr_frame_num;
+ u32 rx_speed;
+ u32 tx_speed;
+ u32 rx_max;
+ u32 tx_max;
+ u32 max_datagram_size;
+ u16 tx_max_datagrams;
+ u16 tx_remainder;
+ u16 tx_modulus;
+ u16 tx_ndp_modulus;
+ u16 tx_seq;
+ u16 connected;
+ u8 data_claimed;
+ u8 control_claimed;
+};
+
+static void cdc_ncm_tx_timeout(unsigned long arg);
+static const struct driver_info cdc_ncm_info;
+static struct usb_driver cdc_ncm_driver;
+static struct ethtool_ops cdc_ncm_ethtool_ops;
+
+static const struct usb_device_id cdc_devs[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_ncm_info,
+ },
+ {
+ },
+};
+
+MODULE_DEVICE_TABLE(usb, cdc_devs);
+
+static void
+cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ strncpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strncpy(info->fw_version, dev->driver_info->description,
+ sizeof(info->fw_version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static int
+cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
+ void *data, u16 flags, u16 *actlen, u16 timeout)
+{
+ int err;
+
+ err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
+ usb_rcvctrlpipe(ctx->udev, 0) :
+ usb_sndctrlpipe(ctx->udev, 0),
+ req->bNotificationType, req->bmRequestType,
+ req->wValue,
+ req->wIndex, data,
+ req->wLength, timeout);
+
+ if (err < 0) {
+ if (actlen)
+ *actlen = 0;
+ return err;
+ }
+
+ if (actlen)
+ *actlen = err;
+
+ return 0;
+}
+
+static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+{
+ struct usb_cdc_notification req;
+ u32 val;
+ __le16 max_datagram_size;
+ u8 flags;
+ u8 iface_no;
+ int err;
+
+ iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
+
+ err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
+ if (err) {
+ pr_debug("failed GET_NTB_PARAMETERS\n");
+ return 1;
+ }
+
+ /* read correct set of parameters according to device mode */
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+
+ if (ctx->func_desc != NULL)
+ flags = ctx->func_desc->bmNetworkCapabilities;
+ else
+ flags = 0;
+
+ pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
+ "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
+ "wNdpOutAlignment=%u flags=0x%x\n",
+ ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+ ctx->tx_ndp_modulus, flags);
+
+ /* max count of tx datagrams without terminating NULL entry */
+ ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+
+ /* verify maximum size of received NTB in bytes */
+ if ((ctx->rx_max <
+ (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+ (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+ pr_debug("Using default maximum receive length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_RX);
+ ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
+ }
+
+ /* verify maximum size of transmitted NTB in bytes */
+ if ((ctx->tx_max <
+ (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+ (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
+ pr_debug("Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
+ ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+ }
+
+ /*
+ * verify that the structure alignment is:
+ * - power of two
+ * - not greater than the maximum transmit length
+ * - not less than four bytes
+ */
+ val = ctx->tx_ndp_modulus;
+
+ if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+ (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+ pr_debug("Using default alignment: 4 bytes\n");
+ ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+ }
+
+ /*
+ * verify that the payload alignment is:
+ * - power of two
+ * - not greater than the maximum transmit length
+ * - not less than four bytes
+ */
+ val = ctx->tx_modulus;
+
+ if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+ (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+ pr_debug("Using default transmit modulus: 4 bytes\n");
+ ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+ }
+
+ /* verify the payload remainder */
+ if (ctx->tx_remainder >= ctx->tx_modulus) {
+ pr_debug("Using default transmit remainder: 0 bytes\n");
+ ctx->tx_remainder = 0;
+ }
+
+ /* adjust TX-remainder according to NCM specification. */
+ ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
+ (ctx->tx_modulus - 1));
+
+ /* additional configuration */
+
+ /* set CRC Mode */
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_CRC_MODE;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting CRC mode off failed\n");
+
+ /* set NTB format */
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting NTB format to 16-bit failed\n");
+
+ /* set Max Datagram Size (MTU) */
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = cpu_to_le16(2);
+
+ err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
+ if (err) {
+ pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
+ CDC_NCM_MIN_DATAGRAM_SIZE);
+ /* use default */
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ } else {
+ ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+ }
+
+ if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
+ ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+
+ return 0;
+}
+
+static void
+cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+{
+ struct usb_host_endpoint *e;
+ u8 ep;
+
+ for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+
+ e = intf->cur_altsetting->endpoint + ep;
+ switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (ctx->status_ep == NULL)
+ ctx->status_ep = e;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (ctx->in_ep == NULL)
+ ctx->in_ep = e;
+ } else {
+ if (ctx->out_ep == NULL)
+ ctx->out_ep = e;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
+{
+ if (ctx == NULL)
+ return;
+
+ del_timer_sync(&ctx->tx_timer);
+
+ if (ctx->data_claimed) {
+ usb_set_intfdata(ctx->data, NULL);
+ usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
+ }
+
+ if (ctx->control_claimed) {
+ usb_set_intfdata(ctx->control, NULL);
+ usb_driver_release_interface(driver_of(ctx->intf),
+ ctx->control);
+ }
+
+ if (ctx->tx_rem_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_rem_skb);
+ ctx->tx_rem_skb = NULL;
+ }
+
+ if (ctx->tx_curr_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_curr_skb);
+ ctx->tx_curr_skb = NULL;
+ }
+
+ kfree(ctx);
+}
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *driver;
+ u8 *buf;
+ int len;
+ int temp;
+ u8 iface_no;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ goto error;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ init_timer(&ctx->tx_timer);
+ spin_lock_init(&ctx->mtx);
+ ctx->netdev = dev->net;
+
+ /* store ctx pointer in device data field */
+ dev->data[0] = (unsigned long)ctx;
+
+ /* get some pointers */
+ driver = driver_of(intf);
+ buf = intf->cur_altsetting->extra;
+ len = intf->cur_altsetting->extralen;
+
+ ctx->udev = dev->udev;
+ ctx->intf = intf;
+
+ /* parse through descriptors associated with control interface */
+ while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
+
+ if (buf[1] != USB_DT_CS_INTERFACE)
+ goto advance;
+
+ switch (buf[2]) {
+ case USB_CDC_UNION_TYPE:
+ if (buf[0] < sizeof(*(ctx->union_desc)))
+ break;
+
+ ctx->union_desc =
+ (const struct usb_cdc_union_desc *)buf;
+
+ ctx->control = usb_ifnum_to_if(dev->udev,
+ ctx->union_desc->bMasterInterface0);
+ ctx->data = usb_ifnum_to_if(dev->udev,
+ ctx->union_desc->bSlaveInterface0);
+ break;
+
+ case USB_CDC_ETHERNET_TYPE:
+ if (buf[0] < sizeof(*(ctx->ether_desc)))
+ break;
+
+ ctx->ether_desc =
+ (const struct usb_cdc_ether_desc *)buf;
+
+ dev->hard_mtu =
+ le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+ if (dev->hard_mtu <
+ (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
+ dev->hard_mtu =
+ CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
+
+ else if (dev->hard_mtu >
+ (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
+ dev->hard_mtu =
+ CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+ break;
+
+ case USB_CDC_NCM_TYPE:
+ if (buf[0] < sizeof(*(ctx->func_desc)))
+ break;
+
+ ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
+ break;
+
+ default:
+ break;
+ }
+advance:
+ /* advance to next descriptor */
+ temp = buf[0];
+ buf += temp;
+ len -= temp;
+ }
+
+ /* check if we got everything */
+ if ((ctx->control == NULL) || (ctx->data == NULL) ||
+ (ctx->ether_desc == NULL))
+ goto error;
+
+ /* claim interfaces, if any */
+ if (ctx->data != intf) {
+ temp = usb_driver_claim_interface(driver, ctx->data, dev);
+ if (temp)
+ goto error;
+ ctx->data_claimed = 1;
+ }
+
+ if (ctx->control != intf) {
+ temp = usb_driver_claim_interface(driver, ctx->control, dev);
+ if (temp)
+ goto error;
+ ctx->control_claimed = 1;
+ }
+
+ iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+
+ /* reset data interface */
+ temp = usb_set_interface(dev->udev, iface_no, 0);
+ if (temp)
+ goto error;
+
+ /* initialize data interface */
+ if (cdc_ncm_setup(ctx))
+ goto error;
+
+ /* configure data interface */
+ temp = usb_set_interface(dev->udev, iface_no, 1);
+ if (temp)
+ goto error;
+
+ cdc_ncm_find_endpoints(ctx, ctx->data);
+ cdc_ncm_find_endpoints(ctx, ctx->control);
+
+ if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
+ (ctx->status_ep == NULL))
+ goto error;
+
+ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+ usb_set_intfdata(ctx->data, dev);
+ usb_set_intfdata(ctx->control, dev);
+ usb_set_intfdata(ctx->intf, dev);
+
+ temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+ if (temp)
+ goto error;
+
+ dev_info(&dev->udev->dev, "MAC-Address: "
+ "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ dev->net->dev_addr[0], dev->net->dev_addr[1],
+ dev->net->dev_addr[2], dev->net->dev_addr[3],
+ dev->net->dev_addr[4], dev->net->dev_addr[5]);
+
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev,
+ ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->status = ctx->status_ep;
+ dev->rx_urb_size = ctx->rx_max;
+
+ /*
+ * We should get an event when network connection is "connected" or
+ * "disconnected". Set network connection in "disconnected" state
+ * (carrier is OFF) during attach, so the IP network stack does not
+ * start IPv6 negotiation and more.
+ */
+ netif_carrier_off(dev->net);
+ ctx->tx_speed = ctx->rx_speed = 0;
+ return 0;
+
+error:
+ cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
+ dev->data[0] = 0;
+ dev_info(&dev->udev->dev, "Descriptor failure\n");
+ return -ENODEV;
+}
+
+static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct usb_driver *driver;
+
+ if (ctx == NULL)
+ return; /* no setup */
+
+ driver = driver_of(intf);
+
+ usb_set_intfdata(ctx->data, NULL);
+ usb_set_intfdata(ctx->control, NULL);
+ usb_set_intfdata(ctx->intf, NULL);
+
+ /* release interfaces, if any */
+ if (ctx->data_claimed) {
+ usb_driver_release_interface(driver, ctx->data);
+ ctx->data_claimed = 0;
+ }
+
+ if (ctx->control_claimed) {
+ usb_driver_release_interface(driver, ctx->control);
+ ctx->control_claimed = 0;
+ }
+
+ cdc_ncm_free(ctx);
+}
+
+static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+{
+ if (first >= max)
+ return;
+ if (first >= end)
+ return;
+ if (end > max)
+ end = max;
+ memset(ptr + first, 0, end - first);
+}
+
+static struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+{
+ struct sk_buff *skb_out;
+ u32 rem;
+ u32 offset;
+ u32 last_offset;
+ u16 n = 0;
+ u8 timeout = 0;
+
+ /* if there is a remaining skb, it gets priority */
+ if (skb != NULL)
+ swap(skb, ctx->tx_rem_skb);
+ else
+ timeout = 1;
+
+ /*
+ * +----------------+
+ * | skb_out |
+ * +----------------+
+ * ^ offset
+ * ^ last_offset
+ */
+
+ /* check if we are resuming an OUT skb */
+ if (ctx->tx_curr_skb != NULL) {
+ /* pop variables */
+ skb_out = ctx->tx_curr_skb;
+ offset = ctx->tx_curr_offset;
+ last_offset = ctx->tx_curr_last_offset;
+ n = ctx->tx_curr_frame_num;
+
+ } else {
+ /* reset variables */
+ skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+ if (skb_out == NULL) {
+ if (skb != NULL) {
+ dev_kfree_skb_any(skb);
+ ctx->netdev->stats.tx_dropped++;
+ }
+ goto exit_no_skb;
+ }
+
+ /* make room for NTH and NDP */
+ offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+ ctx->tx_ndp_modulus) +
+ sizeof(struct usb_cdc_ncm_ndp16) +
+ (ctx->tx_max_datagrams + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16);
+
+ /* store last valid offset before alignment */
+ last_offset = offset;
+ /* align first Datagram offset correctly */
+ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+ /* zero buffer till the first IP datagram */
+ cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
+ n = 0;
+ ctx->tx_curr_frame_num = 0;
+ }
+
+ for (; n < ctx->tx_max_datagrams; n++) {
+ /* check if end of transmit buffer is reached */
+ if (offset >= ctx->tx_max)
+ break;
+
+ /* compute maximum buffer size */
+ rem = ctx->tx_max - offset;
+
+ if (skb == NULL) {
+ skb = ctx->tx_rem_skb;
+ ctx->tx_rem_skb = NULL;
+
+ /* check for end of skb */
+ if (skb == NULL)
+ break;
+ }
+
+ if (skb->len > rem) {
+ if (n == 0) {
+ /* won't fit, MTU problem? */
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ ctx->netdev->stats.tx_dropped++;
+ } else {
+ /* no room for skb - store for later */
+ if (ctx->tx_rem_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_rem_skb);
+ ctx->netdev->stats.tx_dropped++;
+ }
+ ctx->tx_rem_skb = skb;
+ skb = NULL;
+
+ /* loop one more time */
+ timeout = 1;
+ }
+ break;
+ }
+
+ memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
+
+ ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
+ ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
+
+ /* update offset */
+ offset += skb->len;
+
+ /* store last valid offset before alignment */
+ last_offset = offset;
+
+ /* align offset correctly */
+ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+
+ /* zero padding */
+ cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
+ ctx->tx_max);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ /* free up any dangling skb */
+ if (skb != NULL) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ ctx->netdev->stats.tx_dropped++;
+ }
+
+ ctx->tx_curr_frame_num = n;
+
+ if (n == 0) {
+ /* wait for more frames */
+ /* push variables */
+ ctx->tx_curr_skb = skb_out;
+ ctx->tx_curr_offset = offset;
+ ctx->tx_curr_last_offset = last_offset;
+ goto exit_no_skb;
+
+ } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+ /* wait for more frames */
+ /* push variables */
+ ctx->tx_curr_skb = skb_out;
+ ctx->tx_curr_offset = offset;
+ ctx->tx_curr_last_offset = last_offset;
+ /* set the pending count */
+ if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
+ ctx->tx_timer_pending = 2;
+ goto exit_no_skb;
+
+ } else {
+ /* frame goes out */
+ /* variables will be reset at next call */
+ }
+
+ /* check for overflow */
+ if (last_offset > ctx->tx_max)
+ last_offset = ctx->tx_max;
+
+ /* revert offset */
+ offset = last_offset;
+
+ /*
+ * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
+ * we send buffers as it is. If we get more data, it would be more
+ * efficient for USB HS mobile device with DMA engine to receive a full
+ * size NTB, than canceling DMA transfer and receiving a short packet.
+ */
+ if (offset > CDC_NCM_MIN_TX_PKT)
+ offset = ctx->tx_max;
+
+ /* final zero padding */
+ cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
+
+ /* store last offset */
+ last_offset = offset;
+
+ if ((last_offset < ctx->tx_max) && ((last_offset %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+ /* force short packet */
+ *(((u8 *)skb_out->data) + last_offset) = 0;
+ last_offset++;
+ }
+
+ /* zero the rest of the DPEs plus the last NULL entry */
+ for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
+ ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
+ ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
+ }
+
+ /* fill out 16-bit NTB header */
+ ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ ctx->tx_ncm.nth16.wHeaderLength =
+ cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
+ ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
+ ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
+ ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+ ctx->tx_ndp_modulus);
+
+ memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
+ ctx->tx_seq++;
+
+ /* fill out 16-bit NDP table */
+ ctx->tx_ncm.ndp16.dwSignature =
+ cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
+ rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16));
+ ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
+ ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+ &(ctx->tx_ncm.ndp16),
+ sizeof(ctx->tx_ncm.ndp16));
+
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+ sizeof(ctx->tx_ncm.ndp16),
+ &(ctx->tx_ncm.dpe16),
+ (ctx->tx_curr_frame_num + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16));
+
+ /* set frame length */
+ skb_put(skb_out, last_offset);
+
+ /* return skb */
+ ctx->tx_curr_skb = NULL;
+ return skb_out;
+
+exit_no_skb:
+ return NULL;
+}
+
+static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
+{
+ /* start timer, if not already started */
+ if (timer_pending(&ctx->tx_timer) == 0) {
+ ctx->tx_timer.function = &cdc_ncm_tx_timeout;
+ ctx->tx_timer.data = (unsigned long)ctx;
+ ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
+ add_timer(&ctx->tx_timer);
+ }
+}
+
+static void cdc_ncm_tx_timeout(unsigned long arg)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
+ u8 restart;
+
+ spin_lock(&ctx->mtx);
+ if (ctx->tx_timer_pending != 0) {
+ ctx->tx_timer_pending--;
+ restart = 1;
+ } else
+ restart = 0;
+
+ spin_unlock(&ctx->mtx);
+
+ if (restart)
+ cdc_ncm_tx_timeout_start(ctx);
+ else if (ctx->netdev != NULL)
+ usbnet_start_xmit(NULL, ctx->netdev);
+}
+
+static struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb_out;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 need_timer = 0;
+
+ /*
+ * The Ethernet API we are using does not support transmitting
+ * multiple Ethernet frames in a single call. This driver will
+ * accumulate multiple Ethernet frames and send out a larger
+ * USB frame when the USB buffer is full or when a single jiffies
+ * timeout happens.
+ */
+ if (ctx == NULL)
+ goto error;
+
+ spin_lock(&ctx->mtx);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+ if (ctx->tx_curr_skb != NULL)
+ need_timer = 1;
+ spin_unlock(&ctx->mtx);
+
+ /* Start timer, if there is a remaining skb */
+ if (need_timer)
+ cdc_ncm_tx_timeout_start(ctx);
+
+ if (skb_out)
+ dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+ return skb_out;
+
+error:
+ if (skb != NULL)
+ dev_kfree_skb_any(skb);
+
+ return NULL;
+}
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_ncm_ctx *ctx;
+ int sumlen;
+ int actlen;
+ int temp;
+ int nframes;
+ int x;
+ int offset;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ if (ctx == NULL)
+ goto error;
+
+ actlen = skb_in->len;
+ sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
+
+ if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
+ pr_debug("frame too short\n");
+ goto error;
+ }
+
+ memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
+ sizeof(ctx->rx_ncm.nth16));
+
+ if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
+ USB_CDC_NCM_NTH16_SIGN) {
+ pr_debug("invalid NTH16 signature <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
+ goto error;
+ }
+
+ temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
+ if (temp > sumlen) {
+ pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
+ goto error;
+ }
+
+ temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+ if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
+ pr_debug("invalid DPT16 index\n");
+ goto error;
+ }
+
+ memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
+ sizeof(ctx->rx_ncm.ndp16));
+
+ if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
+ USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+ pr_debug("invalid DPT16 signature <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ goto error;
+ }
+
+ if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
+ USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ pr_debug("invalid DPT16 length <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ goto error;
+ }
+
+ nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
+ sizeof(struct usb_cdc_ncm_ndp16)) /
+ sizeof(struct usb_cdc_ncm_dpe16));
+ nframes--; /* we process NDP entries except for the last one */
+
+ pr_debug("nframes = %u\n", nframes);
+
+ temp += sizeof(ctx->rx_ncm.ndp16);
+
+ if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
+ pr_debug("Invalid nframes = %d\n", nframes);
+ goto error;
+ }
+
+ if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
+ pr_debug("Truncating number of frames from %u to %u\n",
+ nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
+ nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
+ }
+
+ memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
+ nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
+
+ for (x = 0; x < nframes; x++) {
+ offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
+ temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
+
+ /*
+ * CDC NCM ch. 3.7
+ * All entries after first NULL entry are to be ignored
+ */
+ if ((offset == 0) || (temp == 0)) {
+ if (!x)
+ goto error; /* empty NTB */
+ break;
+ }
+
+ /* sanity checking */
+ if (((offset + temp) > actlen) ||
+ (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
+ pr_debug("invalid frame detected (ignored)"
+ "offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, temp, skb);
+ if (!x)
+ goto error;
+ break;
+
+ } else {
+ skb = skb_clone(skb_in, GFP_ATOMIC);
+ skb->len = temp;
+ skb->data = ((u8 *)skb_in->data) + offset;
+ skb_set_tail_pointer(skb, temp);
+ usbnet_skb_return(dev, skb);
+ }
+ }
+ return 1;
+error:
+ return 0;
+}
+
+static void
+cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+ struct connection_speed_change *data)
+{
+ uint32_t rx_speed = le32_to_cpu(data->USBitRate);
+ uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+
+ /*
+ * Currently the USB-NET API does not support reporting the actual
+ * device speed. Do print it instead.
+ */
+ if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
+ ctx->tx_speed = tx_speed;
+ ctx->rx_speed = rx_speed;
+
+ if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+ printk(KERN_INFO KBUILD_MODNAME
+ ": %s: %u mbit/s downlink "
+ "%u mbit/s uplink\n",
+ ctx->netdev->name,
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
+ } else {
+ printk(KERN_INFO KBUILD_MODNAME
+ ": %s: %u kbit/s downlink "
+ "%u kbit/s uplink\n",
+ ctx->netdev->name,
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
+ }
+ }
+}
+
+static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_cdc_notification *event;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (urb->actual_length < sizeof(*event))
+ return;
+
+ /* test for split data in 8-byte chunks */
+ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
+ cdc_ncm_speed_change(ctx,
+ (struct connection_speed_change *)urb->transfer_buffer);
+ return;
+ }
+
+ event = urb->transfer_buffer;
+
+ switch (event->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ /*
+ * According to the CDC NCM specification ch.7.1
+ * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+ * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+ */
+ ctx->connected = event->wValue;
+
+ printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
+ " %sconnected\n",
+ ctx->netdev->name, ctx->connected ? "" : "dis");
+
+ if (ctx->connected)
+ netif_carrier_on(dev->net);
+ else {
+ netif_carrier_off(dev->net);
+ ctx->tx_speed = ctx->rx_speed = 0;
+ }
+ break;
+
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ if (urb->actual_length <
+ (sizeof(*event) + sizeof(struct connection_speed_change)))
+ set_bit(EVENT_STS_SPLIT, &dev->flags);
+ else
+ cdc_ncm_speed_change(ctx,
+ (struct connection_speed_change *) &event[1]);
+ break;
+
+ default:
+ dev_err(&dev->udev->dev, "NCM: unexpected "
+ "notification 0x%02x!\n", event->bNotificationType);
+ break;
+ }
+}
+
+static int cdc_ncm_check_connect(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ if (ctx == NULL)
+ return 1; /* disconnected */
+
+ return !ctx->connected;
+}
+
+static int
+cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+ return usbnet_probe(udev, prod);
+}
+
+static void cdc_ncm_disconnect(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ if (dev == NULL)
+ return; /* already disconnected */
+
+ usbnet_disconnect(intf);
+}
+
+static int cdc_ncm_manage_power(struct usbnet *dev, int status)
+{
+ dev->intf->needs_remote_wakeup = status;
+ return 0;
+}
+
+static const struct driver_info cdc_ncm_info = {
+ .description = "CDC NCM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = cdc_ncm_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static struct usb_driver cdc_ncm_driver = {
+ .name = "cdc_ncm",
+ .id_table = cdc_devs,
+ .probe = cdc_ncm_probe,
+ .disconnect = cdc_ncm_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .supports_autosuspend = 1,
+};
+
+static struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_drvinfo = cdc_ncm_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int __init cdc_ncm_init(void)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
+ return usb_register(&cdc_ncm_driver);
+}
+
+module_init(cdc_ncm_init);
+
+static void __exit cdc_ncm_exit(void)
+{
+ usb_deregister(&cdc_ncm_driver);
+}
+
+module_exit(cdc_ncm_exit);
+
+MODULE_AUTHOR("Hans Petter Selasky");
+MODULE_DESCRIPTION("USB CDC NCM host driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..bed8fcedff49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -958,10 +958,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
/* Packet is complete. Inject into stack. */
/* We have IP packet here */
odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
- /* don't check it */
- odev->skb_rx_buf->ip_summed =
- CHECKSUM_UNNECESSARY;
-
skb_reset_mac_header(odev->skb_rx_buf);
/* Ship it off to the kernel */
@@ -1001,6 +997,18 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
}
}
+static void fix_crc_bug(struct urb *urb, __le16 max_packet_size)
+{
+ static const u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
+ u32 rest = urb->actual_length % le16_to_cpu(max_packet_size);
+
+ if (((rest == 5) || (rest == 6)) &&
+ !memcmp(((u8 *)urb->transfer_buffer) + urb->actual_length - 4,
+ crc_check, 4)) {
+ urb->actual_length -= 4;
+ }
+}
+
/* Moving data from usb to kernel (in interrupt state) */
static void read_bulk_callback(struct urb *urb)
{
@@ -1029,17 +1037,8 @@ static void read_bulk_callback(struct urb *urb)
return;
}
- if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
- u32 rest;
- u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length %
- le16_to_cpu(odev->in_endp->wMaxPacketSize);
- if (((rest == 5) || (rest == 6)) &&
- !memcmp(((u8 *) urb->transfer_buffer) +
- urb->actual_length - 4, crc_check, 4)) {
- urb->actual_length -= 4;
- }
- }
+ if (odev->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, odev->in_endp->wMaxPacketSize);
/* do we even have a packet? */
if (urb->actual_length) {
@@ -1231,18 +1230,8 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
return;
if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
- u32 rest;
- u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest =
- urb->actual_length %
- le16_to_cpu(serial->in_endp->wMaxPacketSize);
- if (((rest == 5) || (rest == 6)) &&
- !memcmp(((u8 *) urb->transfer_buffer) +
- urb->actual_length - 4, crc_check, 4)) {
- urb->actual_length -= 4;
- }
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
/* Valid data, handle RX data */
spin_lock(&serial->serial_lock);
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
@@ -1745,7 +1734,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hso_serial *serial = get_serial_by_tty(tty);
- void __user *uarg = (void __user *)arg;
int ret = 0;
D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
@@ -2994,12 +2982,14 @@ static int hso_probe(struct usb_interface *interface,
case HSO_INTF_BULK:
/* It's a regular bulk interface */
- if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
- !disable_net)
- hso_dev = hso_create_net_device(interface, port_spec);
- else
+ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+ if (!disable_net)
+ hso_dev =
+ hso_create_net_device(interface, port_spec);
+ } else {
hso_dev =
hso_create_bulk_serial_device(interface, port_spec);
+ }
if (!hso_dev)
goto exit;
break;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
/* Paranoid */
if (skb->len > IPHETH_BUF_SIZE) {
- WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+ WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
dev->net->stats.tx_dropped++;
dev_kfree_skb_irq(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a6281e3987b5..b701f593cd59 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,5 +1,5 @@
/*
- * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
@@ -11,6 +11,9 @@
*
* Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
*
+ * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"),
+ * per active notification by manufacturer
+ *
* TODO:
* - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
* - implement ethtool_ops get_pauseparam/set_pauseparam
@@ -60,6 +63,7 @@
#define MCS7830_MAX_MCAST 64
#define MCS7830_VENDOR_ID 0x9710
+#define MCS7832_PRODUCT_ID 0x7832
#define MCS7830_PRODUCT_ID 0x7830
#define MCS7730_PRODUCT_ID 0x7730
@@ -626,7 +630,7 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static const struct driver_info moschip_info = {
- .description = "MOSCHIP 7830/7730 usb-NET adapter",
+ .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
.flags = FLAG_ETHER,
@@ -645,6 +649,10 @@ static const struct driver_info sitecom_info = {
static const struct usb_device_id products[] = {
{
+ USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID),
+ .driver_info = (unsigned long) &moschip_info,
+ },
+ {
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
.driver_info = (unsigned long) &moschip_info,
},
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d6..ef3667690b12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
write_mii_word(pegasus, phy_id, loc, val);
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d1ac15c95faf..ed1b43210584 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -802,10 +802,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
dev_dbg(&dev->udev->dev, "%s", __func__);
- /* Kill the timer then flush the work queue */
+ /* kill the timer and work */
del_timer_sync(&priv->sync_timer);
-
- flush_scheduled_work();
+ cancel_work_sync(&priv->sierra_net_kevent);
/* tell modem we are going away */
status = sierra_net_send_cmd(dev, priv->shdwn_msg,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 65cb1abfbe57..bc86f4b6ecc2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1163,9 +1163,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
{
- int len = skb->data - skb->head;
- u16 high_16 = (u16)(skb->csum_offset + skb->csum_start - len);
- u16 low_16 = (u16)(skb->csum_start - len);
+ u16 low_16 = (u16)skb_checksum_start_offset(skb);
+ u16 high_16 = low_16 + skb->csum_offset;
return (high_16 << 16) | low_16;
}
@@ -1193,7 +1192,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
if (skb->len <= 45) {
/* workaround - hardware tx checksum does not work
* properly with extremely small packets */
- long csstart = skb->csum_start - skb_headroom(skb);
+ long csstart = skb_checksum_start_offset(skb);
__wsum calc = csum_partial(skb->data + csstart,
skb->len - csstart, 0);
*((__sum16 *)(skb->data + csstart
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..ed9a41643ff4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -390,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
goto error;
// else network stack removes extra byte if we forced a short packet
- if (skb->len)
- usbnet_skb_return (dev, skb);
- else {
- netif_dbg(dev, rx_err, dev->net, "drop\n");
-error:
- dev->net->stats.rx_errors++;
- skb_queue_tail (&dev->done, skb);
+ if (skb->len) {
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ dev_kfree_skb_any(skb);
+ else
+ usbnet_skb_return(dev, skb);
+ return;
}
+
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
+error:
+ dev->net->stats.rx_errors++;
+ skb_queue_tail(&dev->done, skb);
}
/*-------------------------------------------------------------------------*/
@@ -970,7 +976,8 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->net->stats.tx_packets++;
+ if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
+ dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -1043,8 +1050,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
+ if (netif_msg_tx_err(dev)) {
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
+ } else {
+ /* cdc_ncm collected packet; waits for more */
+ goto not_drop;
+ }
}
}
length = skb->len;
@@ -1066,13 +1078,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
/* don't assume the hardware handles USB_ZERO_PACKET
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
+ * NOTE2: CDC NCM specification is different from CDC ECM when
+ * handling ZLP/short packets, so cdc_ncm driver will make short
+ * packet itself if needed.
*/
if (length % dev->maxpacket == 0) {
if (!(info->flags & FLAG_SEND_ZLP)) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
+ if (!(info->flags & FLAG_MULTI_PACKET)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
}
} else
urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1121,6 +1138,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
dev->net->stats.tx_dropped++;
+not_drop:
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
@@ -1230,8 +1248,7 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- /* we don't hold rtnl here ... */
- flush_scheduled_work ();
+ cancel_work_sync(&dev->kevent);
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
@@ -1273,6 +1290,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0bbc0c323135..cc83fa71c3ff 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -166,7 +166,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(rcv->flags & IFF_UP))
goto tx_drop;
- if (dev->features & NETIF_F_NO_CSUM)
+ /* don't change ip_summed == CHECKSUM_PARTIAL, as that
+ will cause bad checksum on forwarded packets */
+ if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = rcv_priv->ip_summed;
length = skb->len + ETH_HLEN;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9dbc493..5e7f069eab53 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
*/
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.4.3"
-#define DRV_RELDATE "2007-03-06"
+#define DRV_VERSION "1.5.0"
+#define DRV_RELDATE "2010-10-09"
/* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
+#define MCAM_SIZE 32
+#define VCAM_SIZE 32
+
/*
Theory of Operation
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
/* Offsets to the device registers. */
enum register_offsets {
StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
- ChipCmd1=0x09,
+ ChipCmd1=0x09, TQWake=0x0A,
IntrStatus=0x0C, IntrEnable=0x0E,
MulticastFilter0=0x10, MulticastFilter1=0x14,
RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
- MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
StickyHW=0x83, IntrStatus2=0x84,
+ CamMask=0x88, CamCon=0x92, CamAddr=0x93,
WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
WOLcrClr1=0xA6, WOLcgClr=0xA7,
PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
BackCaptureEffect=0x04, BackRandom=0x08
};
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+ TCR_PQEN=0x01,
+ TCR_LB0=0x02, /* loopback[0] */
+ TCR_LB1=0x04, /* loopback[1] */
+ TCR_OFSET=0x08,
+ TCR_RTGOPT=0x10,
+ TCR_RTFT0=0x20,
+ TCR_RTFT1=0x40,
+ TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+ CAMC_CAMEN=0x01,
+ CAMC_VCAMSL=0x02,
+ CAMC_CAMWR=0x04,
+ CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+ BCR1_POT0=0x01,
+ BCR1_POT1=0x02,
+ BCR1_POT2=0x04,
+ BCR1_CTFT0=0x08,
+ BCR1_CTFT1=0x10,
+ BCR1_CTSF=0x20,
+ BCR1_TXQNOBK=0x40, /* for VT6105 */
+ BCR1_VIDFR=0x80, /* for VT6105 */
+ BCR1_MED0=0x40, /* for VT6102 */
+ BCR1_MED1=0x80, /* for VT6102 */
+};
+
#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
DescOwn=0x80000000
};
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+ DescTag=0x00010000
+};
+
/* Bits in ChipCmd. */
enum chip_cmd_bits {
CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
};
struct rhine_private {
+ /* Bit mask for configured VLAN ids */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
/* Descriptor rings */
struct rx_desc *rx_ring;
struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
void __iomem *base;
};
+#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
+
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct net_device *dev);
#define RHINE_WAIT_FOR(condition) do { \
int i=1024; \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout,
+ .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rhine_poll,
#endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+ if (pdev->revision >= VT6105M)
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
netif_carrier_ok(mii->dev));
}
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ int i;
+
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (MCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ for (i = 0; i < 6; i++, addr++)
+ iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (VCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ /* Disable all CAMs */
+ rhine_set_vlan_cam_mask(ioaddr, 0);
+ rhine_set_cam_mask(ioaddr, 0);
+
+ /* disable hardware VLAN support */
+ BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u16 vid;
+ u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
+ unsigned int i = 0;
+
+ for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
+ rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+ vCAMmask |= 1 << i;
+ if (++i >= VCAM_SIZE)
+ break;
+ }
+ rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ set_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ clear_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
+ if (rp->pdev->revision >= VT6105M)
+ rhine_init_cam_filter(dev);
+
napi_enable(&rp->napi);
/* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].desc_length =
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+ /* request tagging */
+ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+ }
+ else
+ rp->tx_ring[entry].tx_status = 0;
+
/* lock eth irq */
spin_lock_irqsave(&rp->lock, flags);
wmb();
- rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
rp->cur_tx++;
/* Non-x86 Todo: explicitly flush cache lines here. */
+ if (vlan_tx_tag_present(skb))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
/* Wake the potentially-idle transmit channel */
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
spin_unlock(&rp->lock);
}
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+ u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+ return ntohs(*(u16 *)trailer);
+}
+
/* Process up to limit frames from receive ring */
static int rhine_rx(struct net_device *dev, int limit)
{
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
for (count = 0; count < limit; ++count) {
struct rx_desc *desc = rp->rx_head_desc;
u32 desc_status = le32_to_cpu(desc->rx_status);
+ u32 desc_length = le32_to_cpu(desc->desc_length);
int data_size = desc_status >> 16;
if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
struct sk_buff *skb = NULL;
/* Length should omit the CRC */
int pkt_len = data_size - 4;
+ u16 vlan_tci = 0;
/* Check if the packet is long enough to accept without
copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
+
+ if (unlikely(desc_length & DescTag))
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
skb->protocol = eth_type_trans(skb, dev);
+
+ if (unlikely(desc_length & DescTag))
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
netif_receive_skb(skb);
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
ioaddr + ChipCmd);
+
+ if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
- iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
if (debug > 1)
printk(KERN_INFO "%s: Transmitter underrun, Tx "
"threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
(intr_status & (IntrTxAborted |
IntrTxUnderrun | IntrTxDescRace)) == 0) {
if (rp->tx_thresh < 0xE0) {
- iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
}
if (debug > 1)
printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 mc_filter[2]; /* Multicast hash filter */
- u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+ u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- rx_mode = 0x0C;
+ } else if (rp->pdev->revision >= VT6105M) {
+ int i = 0;
+ u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == MCAM_SIZE)
+ break;
+ rhine_set_cam(ioaddr, i, ha->addr);
+ mCAMmask |= 1 << i;
+ i++;
+ }
+ rhine_set_cam_mask(ioaddr, mCAMmask);
} else {
- struct netdev_hw_addr *ha;
-
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
}
iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
- rx_mode = 0x0C;
}
- iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+ /* enable/disable VLAN receive filtering */
+ if (rp->pdev->revision >= VT6105M) {
+ if (dev->flags & IFF_PROMISC)
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ else
+ BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ }
+ BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
}
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
if (!netif_running(dev))
return 0;
- if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
+ if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..90a23e410d1b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -519,7 +519,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+ hdr->hdr.csum_start = skb_checksum_start_offset(skb);
hdr->hdr.csum_offset = skb->csum_offset;
} else {
hdr->hdr.flags = 0;
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d7..d143e8b72b5b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static atomic_t devices_found;
+#define VMXNET3_MAX_DEVICES 10
+static int enable_mq = 1;
+static int irq_share_mode;
/*
* Enable/Disable the given intr
@@ -99,7 +102,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
- return netif_queue_stopped(adapter->netdev);
+ return tq->stopped;
}
@@ -107,7 +110,7 @@ static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_start_queue(adapter->netdev);
+ netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
}
@@ -115,7 +118,7 @@ static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_wake_queue(adapter->netdev);
+ netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -124,7 +127,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
- netif_stop_queue(adapter->netdev);
+ netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -135,6 +138,7 @@ static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
+ int i;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_start(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_start(&adapter->tx_queue[i],
+ adapter);
+ }
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_stop(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
+ }
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
+ int i;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
@@ -176,16 +186,18 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
- if (adapter->tqd_start->status.stopped) {
- printk(KERN_ERR "%s: tq error 0x%x\n",
- adapter->netdev->name,
- le32_to_cpu(adapter->tqd_start->status.error));
- }
- if (adapter->rqd_start->status.stopped) {
- printk(KERN_ERR "%s: rq error 0x%x\n",
- adapter->netdev->name,
- adapter->rqd_start->status.error);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: tq[%d] error 0x%x\n",
+ adapter->netdev->name, i, le32_to_cpu(
+ adapter->tqd_start[i].status.error));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: rq[%d] error 0x%x\n",
+ adapter->netdev->name, i,
+ adapter->rqd_start[i].status.error);
schedule_work(&adapter->work);
}
@@ -410,7 +422,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
}
-void
+static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
@@ -437,6 +449,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
}
+/* Destroy all tx queues */
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
+}
+
+
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@ err:
return -ENOMEM;
}
+static void
+vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
+}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
+/* Init all tx queues */
+static void
+vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
+}
+
+
/*
* parse and copy relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
@@ -756,7 +798,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{
struct Vmxnet3_TxDataDesc *tdd;
- if (ctx->mss) {
+ if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
@@ -765,7 +807,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
unsigned int pull_size;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- ctx->eth_ip_hdr_size = skb_transport_offset(skb);
+ ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)
@@ -903,6 +945,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
}
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ dev_dbg(&adapter->netdev->dev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -923,21 +980,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
} else {
tq->stats.drop_hdr_inspect_err++;
- goto drop_pkt;
- }
-
- spin_lock_irqsave(&tq->tx_lock, flags);
-
- if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
- tq->stats.tx_ring_full++;
- dev_dbg(&adapter->netdev->dev,
- "tx queue stopped on %s, next2comp %u"
- " next2fill %u\n", adapter->netdev->name,
- tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
- vmxnet3_tq_stop(tq, adapter);
- spin_unlock_irqrestore(&tq->tx_lock, flags);
- return NETDEV_TX_BUSY;
+ goto unlock_drop_pkt;
}
/* fill tx descs related to addr & len */
@@ -1000,7 +1043,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (le32_to_cpu(tq->shared->txNumDeferred) >=
le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_TXPROD + tq->qid * 8,
tq->tx_ring.next2fill);
}
@@ -1008,6 +1052,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
hdr_too_big:
tq->stats.drop_oversized_hdr++;
+unlock_drop_pkt:
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb(skb);
@@ -1020,7 +1066,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
}
@@ -1082,7 +1131,9 @@ static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
- static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+ static const u32 rxprod_reg[2] = {
+ VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ };
u32 num_rxd = 0;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1106,9 +1157,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
break;
}
num_rxd++;
-
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
- ring_idx = rcd->rqID == rq->qid ? 0 : 1;
+ ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1311,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
}
+static void
+vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+}
+
+
void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
@@ -1351,6 +1412,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
static int
+vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev, "%s: failed to "
+ "initialize rx queue%i\n",
+ adapter->netdev->name, i);
+ break;
+ }
+ }
+ return err;
+
+}
+
+
+static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
@@ -1398,33 +1478,177 @@ err:
static int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev,
+ "%s: failed to create rx queue%i\n",
+ adapter->netdev->name, i);
+ goto err_out;
+ }
+ }
+ return err;
+err_out:
+ vmxnet3_rq_destroy_all(adapter);
+ return err;
+
+}
+
+/* Multiple queue aware polling function for tx and rx */
+
+static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
+ int rcd_done = 0, i;
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
- vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
- return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
+ adapter, budget);
+ return rcd_done;
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
- struct vmxnet3_adapter *adapter = container_of(napi,
- struct vmxnet3_adapter, napi);
+ struct vmxnet3_rx_queue *rx_queue = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ int rxd_done;
+
+ rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete(napi);
+ vmxnet3_enable_all_intrs(rx_queue->adapter);
+ }
+ return rxd_done;
+}
+
+/*
+ * NAPI polling function for MSI-X mode with multiple Rx queues
+ * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
+ */
+
+static int
+vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_rx_queue *rq = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ struct vmxnet3_adapter *adapter = rq->adapter;
int rxd_done;
- rxd_done = vmxnet3_do_poll(adapter, budget);
+ /* When sharing interrupt with corresponding tx queue, process
+ * tx completions in that queue as well
+ */
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ struct vmxnet3_tx_queue *tq =
+ &adapter->tx_queue[rq - adapter->rx_queue];
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+
+ rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
- vmxnet3_enable_intr(adapter, 0);
+ vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
}
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Handle completion interrupts on tx queues
+ * Returns whether or not the intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_tx(int irq, void *data)
+{
+ struct vmxnet3_tx_queue *tq = data;
+ struct vmxnet3_adapter *adapter = tq->adapter;
+
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
+
+ /* Handle the case where only one irq is allocate for all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
+ vmxnet3_tq_tx_complete(txq, adapter);
+ }
+ } else {
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+ vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Handle completion interrupts on rx queues. Returns whether or not the
+ * intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_rx(int irq, void *data)
+{
+ struct vmxnet3_rx_queue *rq = data;
+ struct vmxnet3_adapter *adapter = rq->adapter;
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
+ napi_schedule(&rq->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * vmxnet3_msix_event --
+ *
+ * vmxnet3 msix event intr handler
+ *
+ * Result:
+ * whether or not the intr is handled
+ *
+ *----------------------------------------------------------------------------
+ */
+
+static irqreturn_t
+vmxnet3_msix_event(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
+
+ if (adapter->shared->ecr)
+ vmxnet3_process_events(adapter);
+
+ vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
+
/* Interrupt handler for vmxnet3 */
static irqreturn_t
vmxnet3_intr(int irq, void *dev_id)
@@ -1432,7 +1656,7 @@ vmxnet3_intr(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
- if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
+ if (adapter->intr.type == VMXNET3_IT_INTX) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
@@ -1442,77 +1666,144 @@ vmxnet3_intr(int irq, void *dev_id)
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_intr(adapter, 0);
+ vmxnet3_disable_all_intrs(adapter);
- napi_schedule(&adapter->napi);
+ napi_schedule(&adapter->rx_queue[0].napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int irq;
-#ifdef CONFIG_PCI_MSI
- if (adapter->intr.type == VMXNET3_IT_MSIX)
- irq = adapter->intr.msix_entries[0].vector;
- else
-#endif
- irq = adapter->pdev->irq;
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_all_intrs(adapter);
+
+ vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+ vmxnet3_enable_all_intrs(adapter);
- disable_irq(irq);
- vmxnet3_intr(irq, netdev);
- enable_irq(irq);
}
-#endif
+#endif /* CONFIG_NET_POLL_CONTROLLER */
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
- int err;
+ struct vmxnet3_intr *intr = &adapter->intr;
+ int err = 0, i;
+ int vector = 0;
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- /* we only use 1 MSI-X vector */
- err = request_irq(adapter->intr.msix_entries[0].vector,
- vmxnet3_intr, 0, adapter->netdev->name,
- adapter->netdev);
- } else if (adapter->intr.type == VMXNET3_IT_MSI) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(
+ intr->msix_entries[vector].vector,
+ vmxnet3_msix_tx, 0,
+ adapter->tx_queue[i].name,
+ &adapter->tx_queue[i]);
+ } else {
+ sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ }
+ if (err) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to request irq for MSIX, %s, "
+ "error %d\n",
+ adapter->tx_queue[i].name, err);
+ return err;
+ }
+
+ /* Handle the case where only 1 MSIx was allocated for
+ * all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ for (; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector;
+ vector++;
+ break;
+ } else {
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector++;
+ }
+ }
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
+ vector = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
+ sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
+ adapter->netdev->name, vector);
+ else
+ sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_rx, 0,
+ adapter->rx_queue[i].name,
+ &(adapter->rx_queue[i]));
+ if (err) {
+ printk(KERN_ERR "Failed to request irq for MSIX"
+ ", %s, error %d\n",
+ adapter->rx_queue[i].name, err);
+ return err;
+ }
+
+ adapter->rx_queue[i].comp_ring.intr_idx = vector++;
+ }
+
+ sprintf(intr->event_msi_vector_name, "%s-event-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_event, 0,
+ intr->event_msi_vector_name, adapter->netdev);
+ intr->event_intr_idx = vector;
+
+ } else if (intr->type == VMXNET3_IT_MSI) {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
- } else
+ } else {
#endif
- {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
+#ifdef CONFIG_PCI_MSI
}
-
- if (err)
+#endif
+ intr->num_intrs = vector + 1;
+ if (err) {
printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
- ":%d\n", adapter->netdev->name, adapter->intr.type, err);
+ ":%d\n", adapter->netdev->name, intr->type, err);
+ } else {
+ /* Number of rx queues will not change after this */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rq->qid = i;
+ rq->qid2 = i + adapter->num_rx_queues;
+ }
- if (!err) {
- int i;
- /* init our intr settings */
- for (i = 0; i < adapter->intr.num_intrs; i++)
- adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
- /* next setup intr index for all intr sources */
- adapter->tx_queue.comp_ring.intr_idx = 0;
- adapter->rx_queue.comp_ring.intr_idx = 0;
- adapter->intr.event_intr_idx = 0;
+ /* init our intr settings */
+ for (i = 0; i < intr->num_intrs; i++)
+ intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
+ if (adapter->intr.type != VMXNET3_IT_MSIX) {
+ adapter->intr.event_intr_idx = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx = 0;
+ adapter->rx_queue[0].comp_ring.intr_idx = 0;
+ }
printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
- "allocated\n", adapter->netdev->name, adapter->intr.type,
- adapter->intr.mask_mode, adapter->intr.num_intrs);
+ "allocated\n", adapter->netdev->name, intr->type,
+ intr->mask_mode, intr->num_intrs);
}
return err;
@@ -1522,18 +1813,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
- BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
- adapter->intr.num_intrs <= 0);
+ struct vmxnet3_intr *intr = &adapter->intr;
+ BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
- switch (adapter->intr.type) {
+ switch (intr->type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
- int i;
+ int i, vector = 0;
+
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->tx_queue[i]));
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
+ break;
+ }
+ }
- for (i = 0; i < adapter->intr.num_intrs; i++)
- free_irq(adapter->intr.msix_entries[i].vector,
- adapter->netdev);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->rx_queue[i]));
+ }
+
+ free_irq(intr->msix_entries[vector].vector,
+ adapter->netdev);
+ BUG_ON(vector >= intr->num_intrs);
break;
}
#endif
@@ -1727,6 +2032,15 @@ vmxnet3_set_mc(struct net_device *netdev)
kfree(new_table);
}
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
+}
+
/*
* Set up driver_shared based on settings in adapter.
@@ -1774,40 +2088,72 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc));
+ adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
/* tx queue settings */
- BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
-
- devRead->misc.numTxQueues = 1;
- tqc = &adapter->tqd_start->conf;
- tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
- tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
- tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->tx_queue.buf_info));
- tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
- tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
- tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
- tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
- tqc->txRingSize);
- tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
+ devRead->misc.numTxQueues = adapter->num_tx_queues;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
+ tqc = &adapter->tqd_start[i].conf;
+ tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
+ tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
+ tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
+ tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
+ tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
+ tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
+ tqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_tx_buf_info) *
+ tqc->txRingSize);
+ tqc->intrIdx = tq->comp_ring.intr_idx;
+ }
/* rx queue settings */
- devRead->misc.numRxQueues = 1;
- rqc = &adapter->rqd_start->conf;
- rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
- rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
- rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->rx_queue.buf_info));
- rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
- rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
- rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
- rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
- (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
- rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rqc = &adapter->rqd_start[i].conf;
+ rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
+ rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
+ rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
+ rqc->ddPA = cpu_to_le64(virt_to_phys(
+ rq->buf_info));
+ rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
+ rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
+ rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
+ rqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_rx_buf_info) *
+ (rqc->rxRingSize[0] +
+ rqc->rxRingSize[1]));
+ rqc->intrIdx = rq->comp_ring.intr_idx;
+ }
+
+#ifdef VMXNET3_RSS
+ memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
+
+ if (adapter->rss) {
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ devRead->misc.uptFeatures |= UPT1_F_RSS;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
+ UPT1_RSS_HASH_TYPE_IPV4 |
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 |
+ UPT1_RSS_HASH_TYPE_IPV6;
+ rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+ rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
+ rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
+ get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = i % adapter->num_rx_queues;
+
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(*rssConf);
+ devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ }
+
+#endif /* VMXNET3_RSS */
/* intr settings */
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2175,18 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
- int err;
+ int err, i;
u32 ret;
- dev_dbg(&adapter->netdev->dev,
- "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
- " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
- adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
- adapter->rx_queue.rx_ring[0].size,
- adapter->rx_queue.rx_ring[1].size);
-
- vmxnet3_tq_init(&adapter->tx_queue, adapter);
- err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
+ dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ " ring sizes %u %u %u\n", adapter->netdev->name,
+ adapter->skb_buf_size, adapter->rx_buf_per_pkt,
+ adapter->tx_queue[0].tx_ring.size,
+ adapter->rx_queue[0].rx_ring[0].size,
+ adapter->rx_queue[0].rx_ring[1].size);
+
+ vmxnet3_tq_init_all(adapter);
+ err = vmxnet3_rq_init_all(adapter);
if (err) {
printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
adapter->netdev->name, err);
@@ -1870,10 +2216,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
err = -EINVAL;
goto activate_err;
}
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
- adapter->rx_queue.rx_ring[0].next2fill);
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
- adapter->rx_queue.rx_ring[1].next2fill);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+ adapter->rx_queue[i].rx_ring[0].next2fill);
+ VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+ (i * VMXNET3_REG_ALIGN)),
+ adapter->rx_queue[i].rx_ring[1].next2fill);
+ }
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2234,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
-
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
@@ -1896,7 +2247,7 @@ activate_err:
irq_err:
rq_err:
/* free up buffers we allocated */
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_rq_cleanup_all(adapter);
return err;
}
@@ -1911,6 +2262,7 @@ vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
+ int i;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
@@ -1919,13 +2271,14 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
VMXNET3_CMD_QUIESCE_DEV);
vmxnet3_disable_all_intrs(adapter);
- napi_disable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
- vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
@@ -2047,7 +2400,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
- size_t sz;
+ size_t sz, i, ring0_size, ring1_size, comp_size;
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
+
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2424,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
- adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
- sz - 1) / sz * sz;
- adapter->rx_queue.rx_ring[0].size = min_t(u32,
- adapter->rx_queue.rx_ring[0].size,
- VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+ ring0_size = adapter->rx_queue[0].rx_ring[0].size;
+ ring0_size = (ring0_size + sz - 1) / sz * sz;
+ ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+ sz * sz);
+ ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ comp_size = ring0_size + ring1_size;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rq = &adapter->rx_queue[i];
+ rq->rx_ring[0].size = ring0_size;
+ rq->rx_ring[1].size = ring1_size;
+ rq->comp_ring.size = comp_size;
+ }
}
@@ -2081,29 +2444,53 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size)
{
- int err;
-
- adapter->tx_queue.tx_ring.size = tx_ring_size;
- adapter->tx_queue.data_ring.size = tx_ring_size;
- adapter->tx_queue.comp_ring.size = tx_ring_size;
- adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
- adapter->tx_queue.stopped = true;
- err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
- if (err)
- return err;
+ int err = 0, i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ tq->tx_ring.size = tx_ring_size;
+ tq->data_ring.size = tx_ring_size;
+ tq->comp_ring.size = tx_ring_size;
+ tq->shared = &adapter->tqd_start[i].ctrl;
+ tq->stopped = true;
+ tq->adapter = adapter;
+ tq->qid = i;
+ err = vmxnet3_tq_create(tq, adapter);
+ /*
+ * Too late to change num_tx_queues. We cannot do away with
+ * lesser number of queues than what we asked for
+ */
+ if (err)
+ goto queue_err;
+ }
- adapter->rx_queue.rx_ring[0].size = rx_ring_size;
- adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
+ adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
+ adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- adapter->rx_queue.qid = 0;
- adapter->rx_queue.qid2 = 1;
- adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
- if (err)
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
-
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ /* qid and qid2 for rx queues will be assigned later when num
+ * of rx queues is finalized after allocating intrs */
+ rq->shared = &adapter->rqd_start[i].ctrl;
+ rq->adapter = adapter;
+ err = vmxnet3_rq_create(rq, adapter);
+ if (err) {
+ if (i == 0) {
+ printk(KERN_ERR "Could not allocate any rx"
+ "queues. Aborting.\n");
+ goto queue_err;
+ } else {
+ printk(KERN_INFO "Number of rx queues changed "
+ "to : %d.\n", i);
+ adapter->num_rx_queues = i;
+ err = 0;
+ break;
+ }
+ }
+ }
+ return err;
+queue_err:
+ vmxnet3_tq_destroy_all(adapter);
return err;
}
@@ -2111,11 +2498,12 @@ static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
- int err;
+ int err, i;
adapter = netdev_priv(netdev);
- spin_lock_init(&adapter->tx_queue.tx_lock);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ spin_lock_init(&adapter->tx_queue[i].tx_lock);
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2518,8 @@ vmxnet3_open(struct net_device *netdev)
return 0;
activate_err:
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
queue_err:
return err;
}
@@ -2151,8 +2539,8 @@ vmxnet3_close(struct net_device *netdev)
vmxnet3_quiesce_dev(adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -2164,6 +2552,8 @@ vmxnet3_close(struct net_device *netdev)
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
+ int i;
+
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
@@ -2171,7 +2561,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
dev_close(adapter->netdev);
}
@@ -2202,14 +2593,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size =
- adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+ err = vmxnet3_rq_create_all(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-create rx queue,"
+ printk(KERN_ERR "%s: failed to re-create rx queues,"
" error %d. Closing it.\n", netdev->name, err);
goto out;
}
@@ -2274,6 +2662,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
mac[5] = (tmp >> 8) & 0xff;
}
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Enable MSIx vectors.
+ * Returns :
+ * 0 on successful enabling of required vectors,
+ * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ * could be enabled.
+ * number of vectors which can be enabled otherwise (this number is smaller
+ * than VMXNET3_LINUX_MIN_MSIX_VECT)
+ */
+
+static int
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
+ int vectors)
+{
+ int err = 0, vector_threshold;
+ vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
+ vectors);
+ if (!err) {
+ adapter->intr.num_intrs = vectors;
+ return 0;
+ } else if (err < 0) {
+ printk(KERN_ERR "Failed to enable MSI-X for %s, error"
+ " %d\n", adapter->netdev->name, err);
+ vectors = 0;
+ } else if (err < vector_threshold) {
+ break;
+ } else {
+ /* If fails to enable required number of MSI-x vectors
+ * try enabling 3 of them. One each for rx, tx and event
+ */
+ vectors = vector_threshold;
+ printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
+ " %d instead\n", vectors, adapter->netdev->name,
+ vector_threshold);
+ }
+ }
+
+ printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
+ " are lower than min threshold required.\n");
+ return err;
+}
+
+
+#endif /* CONFIG_PCI_MSI */
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2730,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int err;
-
- adapter->intr.msix_entries[0].entry = 0;
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- VMXNET3_LINUX_MAX_MSIX_VECT);
- if (!err) {
- adapter->intr.num_intrs = 1;
- adapter->intr.type = VMXNET3_IT_MSIX;
+ int vector, err = 0;
+
+ adapter->intr.num_intrs = (adapter->share_intr ==
+ VMXNET3_INTR_TXSHARE) ? 1 :
+ adapter->num_tx_queues;
+ adapter->intr.num_intrs += (adapter->share_intr ==
+ VMXNET3_INTR_BUDDYSHARE) ? 0 :
+ adapter->num_rx_queues;
+ adapter->intr.num_intrs += 1; /* for link event */
+
+ adapter->intr.num_intrs = (adapter->intr.num_intrs >
+ VMXNET3_LINUX_MIN_MSIX_VECT
+ ? adapter->intr.num_intrs :
+ VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ for (vector = 0; vector < adapter->intr.num_intrs; vector++)
+ adapter->intr.msix_entries[vector].entry = vector;
+
+ err = vmxnet3_acquire_msix_vectors(adapter,
+ adapter->intr.num_intrs);
+ /* If we cannot allocate one MSIx vector per queue
+ * then limit the number of rx queues to 1
+ */
+ if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
+ || adapter->num_rx_queues != 2) {
+ adapter->share_intr = VMXNET3_INTR_TXSHARE;
+ printk(KERN_ERR "Number of rx queues : 1\n");
+ adapter->num_rx_queues = 1;
+ adapter->intr.num_intrs =
+ VMXNET3_LINUX_MIN_MSIX_VECT;
+ }
return;
}
+ if (!err)
+ return;
+
+ /* If we cannot allocate MSIx vectors use only one rx queue */
+ printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
+ "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
+
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2310,12 +2778,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
int err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
+ adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
}
}
#endif /* CONFIG_PCI_MSI */
+ adapter->num_rx_queues = 1;
+ printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
@@ -2343,6 +2814,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
schedule_work(&adapter->work);
+ netif_wake_queue(adapter->netdev);
}
@@ -2399,8 +2871,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
+ int size;
+ int num_tx_queues;
+ int num_rx_queues;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ if (enable_mq)
+ num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
+ (int)num_online_cpus());
+ else
+ num_tx_queues = 1;
+
+ netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
+ max(num_tx_queues, num_rx_queues));
+ printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
+ num_tx_queues, num_rx_queues);
- netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
if (!netdev) {
printk(KERN_ERR "Failed to alloc ethernet device for adapter "
"%s\n", pci_name(pdev));
@@ -2422,9 +2915,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_shared;
}
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
+ adapter->num_rx_queues = num_rx_queues;
+ adapter->num_tx_queues = num_tx_queues;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
@@ -2433,8 +2929,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_alloc_queue_desc;
}
- adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
- + 1);
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
@@ -2444,6 +2940,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pm;
}
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
if (err < 0)
goto err_alloc_pci;
@@ -2471,18 +2978,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
+
+ adapter->share_intr = irq_share_mode;
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
+ adapter->num_tx_queues != adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_DONTSHARE;
+
vmxnet3_alloc_intr_resources(adapter);
+#ifdef VMXNET3_RSS
+ if (adapter->num_rx_queues > 1 &&
+ adapter->intr.type == VMXNET3_IT_MSIX) {
+ adapter->rss = true;
+ printk(KERN_INFO "RSS is enabled.\n");
+ } else {
+ adapter->rss = false;
+ }
+#endif
+
vmxnet3_read_mac_addr(adapter, mac);
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev->netdev_ops = &vmxnet3_netdev_ops;
- netdev->watchdog_timeo = 5 * HZ;
vmxnet3_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
- netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ netif_napi_add(adapter->netdev,
+ &adapter->rx_queue[i].napi,
+ vmxnet3_poll_rx_only, 64);
+ }
+ } else {
+ netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
+ vmxnet3_poll, 64);
+ }
+
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+
SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
@@ -2502,11 +3039,14 @@ err_register:
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+err_alloc_rss:
+#endif
kfree(adapter->pm_conf);
err_alloc_pm:
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
@@ -2522,17 +3062,32 @@ vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int size = 0;
+ int num_rx_queues;
- flush_scheduled_work();
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ cancel_work_sync(&adapter->work);
unregister_netdev(netdev);
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+#endif
kfree(adapter->pm_conf);
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
free_netdev(netdev);
@@ -2563,7 +3118,7 @@ vmxnet3_suspend(struct device *device)
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92e..8e17fc8a7fe7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
+ int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- /* Assuming that we have a single queue device */
- devTxStats = &adapter->tqd_start->stats;
- devRxStats = &adapter->rqd_start->stats;
-
- /* Get access to the driver stats per queue */
- drvTxStats = &adapter->tx_queue.stats;
- drvRxStats = &adapter->rx_queue.stats;
-
memset(net_stats, 0, sizeof(*net_stats));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ devTxStats = &adapter->tqd_start[i].stats;
+ drvTxStats = &adapter->tx_queue[i].stats;
+ net_stats->tx_packets += devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+ net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+ net_stats->tx_errors += devTxStats->pktsTxError;
+ net_stats->tx_dropped += drvTxStats->drop_total;
+ }
- net_stats->rx_packets = devRxStats->ucastPktsRxOK +
- devRxStats->mcastPktsRxOK +
- devRxStats->bcastPktsRxOK;
-
- net_stats->tx_packets = devTxStats->ucastPktsTxOK +
- devTxStats->mcastPktsTxOK +
- devTxStats->bcastPktsTxOK;
-
- net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
- devRxStats->mcastBytesRxOK +
- devRxStats->bcastBytesRxOK;
-
- net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
- devTxStats->mcastBytesTxOK +
- devTxStats->bcastBytesTxOK;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ devRxStats = &adapter->rqd_start[i].stats;
+ drvRxStats = &adapter->rx_queue[i].stats;
+ net_stats->rx_packets += devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
- net_stats->rx_errors = devRxStats->pktsRxError;
- net_stats->tx_errors = devTxStats->pktsTxError;
- net_stats->rx_dropped = drvRxStats->drop_total;
- net_stats->tx_dropped = drvTxStats->drop_total;
- net_stats->multicast = devRxStats->mcastPktsRxOK;
+ net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
+ net_stats->rx_errors += devRxStats->pktsRxError;
+ net_stats->rx_dropped += drvRxStats->drop_total;
+ net_stats->multicast += devRxStats->mcastPktsRxOK;
+ }
return net_stats;
}
@@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 *base;
int i;
+ int j = 0;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
/* this does assume each counter is 64-bit wide */
+/* TODO change this for multiple queues */
- base = (u8 *)&adapter->tqd_start->stats;
+ base = (u8 *)&adapter->tqd_start[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
- base = (u8 *)&adapter->tx_queue.stats;
+ base = (u8 *)&adapter->tx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
- base = (u8 *)&adapter->rqd_start->stats;
+ base = (u8 *)&adapter->rqd_start[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
- base = (u8 *)&adapter->rx_queue.stats;
+ base = (u8 *)&adapter->rx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
@@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
+ int i = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
@@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
- buf[0] = adapter->tx_queue.tx_ring.next2fill;
- buf[1] = adapter->tx_queue.tx_ring.next2comp;
- buf[2] = adapter->tx_queue.tx_ring.gen;
+/* TODO change this for multiple queues */
+ buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
+ buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
+ buf[2] = adapter->tx_queue[i].tx_ring.gen;
buf[3] = 0;
- buf[4] = adapter->tx_queue.comp_ring.next2proc;
- buf[5] = adapter->tx_queue.comp_ring.gen;
- buf[6] = adapter->tx_queue.stopped;
+ buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
+ buf[5] = adapter->tx_queue[i].comp_ring.gen;
+ buf[6] = adapter->tx_queue[i].stopped;
buf[7] = 0;
- buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
- buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
- buf[10] = adapter->rx_queue.rx_ring[0].gen;
+ buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
+ buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
+ buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
buf[11] = 0;
- buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
- buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
- buf[14] = adapter->rx_queue.rx_ring[1].gen;
+ buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
+ buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
+ buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
buf[15] = 0;
- buf[16] = adapter->rx_queue.comp_ring.next2proc;
- buf[17] = adapter->rx_queue.comp_ring.gen;
+ buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
+ buf[17] = adapter->rx_queue[i].comp_ring.gen;
buf[18] = 0;
buf[19] = 0;
}
@@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue.rx_ring[0].size;
- param->tx_pending = adapter->tx_queue.tx_ring.size;
+ param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
+ adapter->num_rx_queues;
+ param->tx_pending = adapter->tx_queue[0].tx_ring.size *
+ adapter->num_tx_queues;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
@@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
sz) != 0)
return -EINVAL;
- if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
- new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
+ if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
+ new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
return 0;
}
@@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
/* recreate the rx queue and the tx queue based on the
* new sizes */
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_tq_destroy_all(adapter);
+ vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+
if (err) {
/* failed, most likely because of OOM, try default
* size */
@@ -535,6 +540,66 @@ out:
}
+static int
+vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ void *rules)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_rx_queues;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+#ifdef VMXNET3_RSS
+static int
+vmxnet3_get_rss_indir(struct net_device *netdev,
+ struct ethtool_rxfh_indir *p)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+
+ p->size = rssConf->indTableSize;
+ while (n--)
+ p->ring_index[n] = rssConf->indTable[n];
+ return 0;
+
+}
+
+static int
+vmxnet3_set_rss_indir(struct net_device *netdev,
+ const struct ethtool_rxfh_indir *p)
+{
+ unsigned int i;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ if (p->size != rssConf->indTableSize)
+ return -EINVAL;
+ for (i = 0; i < rssConf->indTableSize; i++) {
+ /*
+ * Return with error code if any of the queue indices
+ * is out of range
+ */
+ if (p->ring_index[i] < 0 ||
+ p->ring_index[i] >= adapter->num_rx_queues)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = p->ring_index[i];
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+
+ return 0;
+
+}
+#endif
+
static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +623,11 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
+ .get_rxnfc = vmxnet3_get_rxnfc,
+#ifdef VMXNET3_RSS
+ .get_rxfh_indir = vmxnet3_get_rss_indir,
+ .set_rxfh_indir = vmxnet3_set_rss_indir,
+#endif
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843afc..7fadeed37f03 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
+#if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+ #define VMXNET3_RSS
+#endif
/*
* Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
};
struct vmxnet3_tx_queue {
+ char name[IFNAMSIZ+8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
- struct vmxnet3_tx_buf_info *buf_info;
+ struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
- struct Vmxnet3_TxQueueCtrl *shared;
+ struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
bool stopped;
int num_stop; /* # of times the queue is
* stopped */
+ int qid;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
};
struct vmxnet3_rx_queue {
+ char name[IFNAMSIZ + 8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
+ struct napi_struct napi;
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define VMXNET3_LINUX_MAX_MSIX_VECT 1
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
+
+/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
+#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
+ VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
+#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
+
struct vmxnet3_intr {
enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,32 @@ struct vmxnet3_intr {
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+ char event_msi_vector_name[IFNAMSIZ+11];
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
};
+/* Interrupt sharing schemes, share_intr */
+#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
+#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
+#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
+
+
#define VMXNET3_STATE_BIT_RESETTING 0
#define VMXNET3_STATE_BIT_QUIESCED 1
struct vmxnet3_adapter {
- struct vmxnet3_tx_queue tx_queue;
- struct vmxnet3_rx_queue rx_queue;
- struct napi_struct napi;
- struct vlan_group *vlan_grp;
-
- struct vmxnet3_intr intr;
-
- struct Vmxnet3_DriverShared *shared;
- struct Vmxnet3_PMConf *pm_conf;
- struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
- struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
- struct net_device *netdev;
- struct pci_dev *pdev;
+ struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
+ struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
+ struct vlan_group *vlan_grp;
+ struct vmxnet3_intr intr;
+ struct Vmxnet3_DriverShared *shared;
+ struct Vmxnet3_PMConf *pm_conf;
+ struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
+ struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
+ struct net_device *netdev;
+ struct net_device_stats net_stats;
+ struct pci_dev *pdev;
u8 __iomem *hw_addr0; /* for BAR 0 */
u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@ struct vmxnet3_adapter {
bool rxcsum;
bool lro;
bool jumbo_frame;
+#ifdef VMXNET3_RSS
+ struct UPT1_RSSConf *rss_conf;
+ bool rss;
+#endif
+ u32 num_rx_queues;
+ u32 num_tx_queues;
/* rx buffer related */
unsigned skb_buf_size;
@@ -327,6 +357,7 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int dev_number;
+ int share_intr;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +397,10 @@ void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
void
-vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
-vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..01c05f53e2f9 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,109 +19,128 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
-
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
+#include "vxge-main.h"
+
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
+ status = __vxge_hw_vpath_stats_access(vpath, \
+ VXGE_HW_STATS_OP_READ, \
+ offset, \
+ &val64); \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+ u64 val64;
-static void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ writeq(val64, &vp_reg->rxmac_vcfg0);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+}
-static void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 val64, rxd_count, rxd_spat;
+ int count = 0, total_count = 0;
-static void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
+ /* Check that the ring controller for this vpath has enough free RxDs
+ * to send frames to the host. This is done by reading the
+ * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+ * RXD_SPAT value for the vpath.
+ */
+ val64 = readq(&vp_reg->prc_cfg6);
+ rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+ /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+ * leg room.
+ */
+ rxd_spat *= 2;
-static void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
+ do {
+ mdelay(1);
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
+ rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
+ /* Check that the ring controller for this vpath does
+ * not have any frame in its pipeline.
+ */
+ val64 = readq(&vp_reg->frm_in_progress_cnt);
+ if ((rxd_count <= rxd_spat) || (val64 > 0))
+ count = 0;
+ else
+ count++;
+ total_count++;
+ } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+ (total_count < VXGE_HW_MAX_POLLING_COUNT));
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+ __func__);
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+ return total_count;
+}
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+ int i, total_count = 0;
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+ total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ break;
+ }
+}
+/*
+ * __vxge_hw_device_register_poll
+ * Will poll certain register for specified amount of time.
+ * Will poll until masked bit is not cleared.
+ */
static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
+__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
+{
+ u64 val64;
+ u32 i = 0;
+ enum vxge_hw_status ret = VXGE_HW_FAIL;
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+ udelay(10);
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ udelay(100);
+ } while (++i <= 9);
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+ i = 0;
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ mdelay(1);
+ } while (++i <= max_millis);
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
+ return ret;
+}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
@@ -129,139 +148,258 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
{
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
-
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
}
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
- u32 item_size, u32 private_size, u32 items_initial,
- u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
-
static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+ u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+ u64 *steer_ctrl)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ enum vxge_hw_status status;
+ u64 val64;
+ u32 retry = 0, max_retry = 100;
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+ vp_reg = vpath->vp_reg;
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+ if (vpath->vp_open) {
+ max_retry = 3;
+ spin_lock(&vpath->lock);
+ }
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
+ writeq(*data0, &vp_reg->rts_access_steer_data0);
+ writeq(*data1, &vp_reg->rts_access_steer_data1);
+ wmb();
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+ val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+ *steer_ctrl;
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+ /* The __vxge_hw_device_register_poll can udelay for a significant
+ * amount of time, blocking other proccess from the CPU. If it delays
+ * for ~5secs, a NMI error can occur. A way around this is to give up
+ * the processor via msleep, but this is not allowed is under lock.
+ * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
+ * 1sec and sleep for 10ms until the firmware operation has completed
+ * or timed-out.
+ */
+ while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+ if (!vpath->vp_open)
+ msleep(20);
+ status = __vxge_hw_device_register_poll(
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ }
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+ if (status != VXGE_HW_OK)
+ goto out;
+ val64 = readq(&vp_reg->rts_access_steer_ctrl);
+ if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ *data0 = readq(&vp_reg->rts_access_steer_data0);
+ *data1 = readq(&vp_reg->rts_access_steer_data1);
+ *steer_ctrl = val64;
+ } else
+ status = VXGE_HW_FAIL;
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+out:
+ if (vpath->vp_open)
+ spin_unlock(&vpath->lock);
+ return status;
+}
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_READ,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat);
+ *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+ return status;
+}
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ u32 ret;
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space, void *userdata)
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+ goto exit;
+ }
+
+ ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+ if (ret != 1) {
+ vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+ __func__, ret);
+ status = VXGE_HW_FAIL;
+ }
+
+exit:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int ret_code, sec_code;
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
+ /* send upgrade start command */
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_START,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+ __func__);
+ return status;
}
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
+ /* Transfer fw image to adapter 16 bytes at a time */
+ for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+ steer_ctrl = 0;
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
+ /* The next 128bits of fwdata to be loaded onto the adapter */
+ data0 = *((u64 *)fwdata);
+ data1 = *((u64 *)fwdata + 1);
- channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+ __func__);
+ goto out;
+ }
- channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
+ ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+ switch (ret_code) {
+ case VXGE_HW_FW_UPGRADE_OK:
+ /* All OK, send next 16 bytes. */
+ break;
+ case VXGE_FW_UPGRADE_BYTES2SKIP:
+ /* skip bytes in the stream */
+ fwdata += (data0 >> 8) & 0xFFFFFFFF;
+ break;
+ case VXGE_HW_FW_UPGRADE_DONE:
+ goto out;
+ case VXGE_HW_FW_UPGRADE_ERR:
+ sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+ switch (sec_code) {
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+ printk(KERN_ERR
+ "corrupted data from .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+ printk(KERN_ERR "invalid .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+ printk(KERN_ERR "buffer overflow\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+ printk(KERN_ERR "failed to flash the image\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+ printk(KERN_ERR
+ "generic error. Unknown error type\n");
+ break;
+ default:
+ printk(KERN_ERR "Unknown error of type %d\n",
+ sec_code);
+ break;
+ }
+ status = VXGE_HW_FAIL;
+ goto out;
+ default:
+ printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+ status = VXGE_HW_FAIL;
+ goto out;
+ }
+ /* point to next 16 bytes */
+ fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+ }
+out:
+ return status;
+}
- channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *img)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int i;
- channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+ data1 = steer_ctrl = 0;
-exit0:
- return NULL;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ VXGE_HW_FW_API_GET_EPROM_REV,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ break;
+
+ img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+ img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+ img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+ img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+ }
+
+ return status;
}
/*
@@ -269,7 +407,7 @@ exit0:
* This function deallocates memory from the channel and various arrays
* in the channel
*/
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
{
kfree(channel->work_arr);
kfree(channel->free_arr);
@@ -283,7 +421,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
* This function initializes a channel by properly setting the
* various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -318,7 +456,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
* __vxge_hw_channel_reset - Resets a channel
* This function resets a channel by properly setting the various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -345,8 +483,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
* Initialize certain PCI/PCI-X configuration registers
* with recommended values. Save config space for future hw resets.
*/
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
{
u16 cmd = 0;
@@ -358,39 +495,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
pci_save_state(hldev->pdev);
}
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
- enum vxge_hw_status ret = VXGE_HW_FAIL;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return ret;
-}
-
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
@@ -405,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
}
/*
+ * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
+ * Set the swapper bits appropriately for the lagacy section.
+ */
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ wmb();
+
+ switch (val64) {
+ case VXGE_HW_SWAPPER_INITIAL_VALUE:
+ return status;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+ }
+
+ wmb();
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
+ status = VXGE_HW_ERR_SWAPPER_CTRL;
+
+ return status;
+}
+
+/*
* __vxge_hw_device_toc_get
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
@@ -435,7 +594,7 @@ exit:
* register location pointers in the device object. It waits until the ric is
* completed initializing registers.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
{
u64 val64;
@@ -496,26 +655,6 @@ exit:
}
/*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = readq(&hldev->common_reg->titan_asic_id);
- hldev->device_id =
- (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
- hldev->major_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
- hldev->minor_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
-/*
* __vxge_hw_device_access_rights_get: Get Access Rights of the driver
* This routine returns the Access Rights of the driver
*/
@@ -568,10 +707,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
}
/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+ u64 val64;
+
+ val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+ return
+ (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
+/*
* __vxge_hw_device_host_info_get
* This routine returns the host type assignments
*/
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 i;
@@ -584,16 +738,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
hldev->func_id =
- __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+ __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
hldev->access_rights = __vxge_hw_device_access_rights_get(
hldev->host_type, hldev->func_id);
+ hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+ hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
hldev->first_vp_id = i;
break;
}
@@ -634,7 +790,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
* __vxge_hw_device_initialize
* Initialize Titan-V hardware.
*/
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -650,6 +807,196 @@ exit:
return status;
}
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+ struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+ struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+ struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ fw_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+ fw_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+ fw_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+ snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ fw_date->month, fw_date->day, fw_date->year);
+
+ fw_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ fw_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ fw_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ fw_version->major, fw_version->minor, fw_version->build);
+
+ flash_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+ flash_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+ flash_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+ snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ flash_date->month, flash_date->day, flash_date->year);
+
+ flash_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+ flash_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+ flash_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+ snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ flash_version->major, flash_version->minor,
+ flash_version->build);
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ enum vxge_hw_status status;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ u8 *serial_number = hw_info->serial_number;
+ u8 *part_number = hw_info->part_number;
+ u8 *product_desc = hw_info->product_desc;
+ u32 i, j = 0;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+ ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)part_number)[0] = be64_to_cpu(data0);
+ ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+ for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+ i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+ data0 = i;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+ }
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ data0 = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_FUNC_MODE,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ * from MAC address table.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+ u8 *macaddr, u8 *macaddr_mask)
+{
+ u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+ int i;
+
+ do {
+ status = vxge_hw_vpath_fw_api(vpath, action,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+ data1);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i - 1] = (u8) (data0 & 0xFF);
+ data0 >>= 8;
+
+ macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+ data1 >>= 8;
+ }
+
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+
+ } while (!is_valid_ether_addr(macaddr));
+exit:
+ return status;
+}
+
/**
* vxge_hw_device_hw_info_get - Get the hw information
* Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1012,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
struct vxge_hw_toc_reg __iomem *toc;
struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpath_reg __iomem *vpath_reg;
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
enum vxge_hw_status status;
+ struct __vxge_hw_virtualpath vpath;
memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
@@ -693,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
@@ -702,7 +1048,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
(bar0 + val64);
- hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+ hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
hw_info->func_id) &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1064,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- hw_info->function_mode =
- __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
+ status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
- status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
- status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
@@ -735,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
}
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- status = __vxge_hw_vpath_addr_get(i, vpath_reg,
+ status = __vxge_hw_vpath_addr_get(&vpath,
hw_info->mac_addrs[i],
hw_info->mac_addr_masks[i]);
if (status != VXGE_HW_OK)
@@ -753,6 +1103,218 @@ exit:
}
/*
+ * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ */
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+{
+ struct __vxge_hw_device *hldev;
+ struct list_head *p, *n;
+ u16 ret;
+
+ if (blockpool == NULL) {
+ ret = 1;
+ goto exit;
+ }
+
+ hldev = blockpool->hldev;
+
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
+ pci_unmap_single(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
+
+ vxge_os_dma_free(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree(p);
+ blockpool->pool_size--;
+ }
+
+ list_for_each_safe(p, n, &blockpool->free_entry_list) {
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree((void *)p);
+ }
+ ret = 0;
+exit:
+ return;
+}
+
+/*
+ * __vxge_hw_blockpool_create - Create block pool
+ */
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max)
+{
+ u32 i;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ void *memblock;
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ struct pci_dev *acc_handle;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (blockpool == NULL) {
+ status = VXGE_HW_FAIL;
+ goto blockpool_create_exit;
+ }
+
+ blockpool->hldev = hldev;
+ blockpool->block_size = VXGE_HW_BLOCK_SIZE;
+ blockpool->pool_size = 0;
+ blockpool->pool_max = pool_max;
+ blockpool->req_out = 0;
+
+ INIT_LIST_HEAD(&blockpool->free_block_list);
+ INIT_LIST_HEAD(&blockpool->free_entry_list);
+
+ for (i = 0; i < pool_size + pool_max; i++) {
+ entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ list_add(&entry->item, &blockpool->free_entry_list);
+ }
+
+ for (i = 0; i < pool_size; i++) {
+ memblock = vxge_os_dma_malloc(
+ hldev->pdev,
+ VXGE_HW_BLOCK_SIZE,
+ &dma_handle,
+ &acc_handle);
+ if (memblock == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ dma_addr = pci_map_single(hldev->pdev, memblock,
+ VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(hldev->pdev,
+ dma_addr))) {
+ vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry == NULL)
+ entry =
+ kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry != NULL) {
+ list_del(&entry->item);
+ entry->length = VXGE_HW_BLOCK_SIZE;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ } else {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ }
+
+blockpool_create_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_device_fifo_config_check - Check fifo configuration.
+ * Check the fifo configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+{
+ if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
+ (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
+ return VXGE_HW_BADCFG_FIFO_BLOCKS;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_vpath_config_check - Check vpath configuration.
+ * Check the vpath configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+{
+ enum vxge_hw_status status;
+
+ if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
+ (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
+ return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+
+ status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
+ ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
+ (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
+ return VXGE_HW_BADCFG_VPATH_MTU;
+
+ if ((vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
+ return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_config_check - Check device configuration.
+ * Check the device configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+{
+ u32 i;
+ enum vxge_hw_status status;
+
+ if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
+ return VXGE_HW_BADCFG_INTR_MODE;
+
+ if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
+ (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
+ return VXGE_HW_BADCFG_RTS_MAC_EN;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ status = __vxge_hw_device_vpath_config_check(
+ &new_config->vp_config[i]);
+ if (status != VXGE_HW_OK)
+ return status;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
* vxge_hw_device_initialize - Initialize Titan device.
* Initialize Titan device. Note that all the arguments of this public API
* are 'IN', including @hldev. Driver cooperates with
@@ -776,14 +1338,12 @@ vxge_hw_device_initialize(
if (status != VXGE_HW_OK)
goto exit;
- hldev = (struct __vxge_hw_device *)
- vmalloc(sizeof(struct __vxge_hw_device));
+ hldev = vzalloc(sizeof(struct __vxge_hw_device));
if (hldev == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(hldev, 0, sizeof(struct __vxge_hw_device));
hldev->magic = VXGE_HW_DEVICE_MAGIC;
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1366,6 @@ vxge_hw_device_initialize(
vfree(hldev);
goto exit;
}
- __vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -814,7 +1373,6 @@ vxge_hw_device_initialize(
nblocks++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
@@ -839,7 +1397,6 @@ vxge_hw_device_initialize(
}
status = __vxge_hw_device_initialize(hldev);
-
if (status != VXGE_HW_OK) {
vxge_hw_device_terminate(hldev);
goto exit;
@@ -865,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
}
/*
+ * __vxge_hw_vpath_stats_access - Get the statistics from the given location
+ * and offset and perform an operation
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto vpath_stats_access_exit;
+ }
+
+ vp_reg = vpath->vp_reg;
+
+ val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->xmac_stats_access_cmd,
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
+ vpath->hldev->config.device_poll_millis);
+ if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+ *stat = readq(&vp_reg->xmac_stats_access_data);
+ else
+ *stat = 0;
+
+vpath_stats_access_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
+{
+ u64 *val64;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = (u64 *)vpath_tx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ offset++;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+{
+ u64 *val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
+ val64 = (u64 *) vpath_rx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset >> 3, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ offset += 8;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ vp_reg = vpath->vp_reg;
+
+ val64 = readq(&vp_reg->vpath_debug_stats0);
+ hw_stats->ini_num_mwr_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats1);
+ hw_stats->ini_num_mrd_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats2);
+ hw_stats->ini_num_cpl_rcvd =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats3);
+ hw_stats->ini_num_mwr_byte_sent =
+ VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats4);
+ hw_stats->ini_num_cpl_byte_rcvd =
+ VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats5);
+ hw_stats->wrcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats6);
+ hw_stats->rdcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count0 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count1 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count23);
+ hw_stats->vpath_genstats_count2 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count3 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count4);
+ hw_stats->vpath_genstats_count4 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count5);
+ hw_stats->vpath_genstats_count5 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
+ val64);
+
+ status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+
+ hw_stats->prog_event_vnum0 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+
+ hw_stats->prog_event_vnum1 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+
+ hw_stats->prog_event_vnum2 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+
+ hw_stats->prog_event_vnum3 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+
+ val64 = readq(&vp_reg->rx_multi_cast_stats);
+ hw_stats->rx_multi_cast_frame_discard =
+ (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+
+ val64 = readq(&vp_reg->rx_frm_transferred);
+ hw_stats->rx_frm_transferred =
+ (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+
+ val64 = readq(&vp_reg->rxd_returned);
+ hw_stats->rxd_returned =
+ (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_mpa);
+ hw_stats->rx_mpa_len_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_mrk_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_crc_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_fau);
+ hw_stats->rx_permitted_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
+ hw_stats->rx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
+ hw_stats->rx_wol_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+
+ val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
+ hw_stats->tx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
+ val64);
+exit:
+ return status;
+}
+
+/*
* vxge_hw_device_stats_get - Get the device hw statistics.
* Returns the vpath h/w stats for the device.
*/
@@ -876,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
(hldev->virtual_paths[i].vp_open ==
VXGE_HW_VP_NOT_OPEN))
@@ -1031,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
0, &xmac_stats->aggr_stats[0]);
-
if (status != VXGE_HW_OK)
goto exit;
@@ -1165,7 +1956,6 @@ exit:
* It can be used to set or reset Pause frame generation or reception
* support of the NIC.
*/
-
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
u32 port, u32 tx, u32 rx)
{
@@ -1407,190 +2197,359 @@ exit:
}
/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- *
+ * __vxge_hw_channel_allocate - Allocate memory for channel
+ * This function allocates required memory for the channel and various arrays
+ * in the channel
*/
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
+static struct __vxge_hw_channel *
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type,
+ u32 length, u32 per_dtr_space,
+ void *userdata)
{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
+ struct __vxge_hw_channel *channel;
struct __vxge_hw_device *hldev;
+ int size = 0;
u32 vp_id;
- struct vxge_hw_mempool_cbs ring_mp_callback;
- if ((vp == NULL) || (attr == NULL)) {
+ hldev = vph->vpath->hldev;
+ vp_id = vph->vpath->vp_id;
+
+ switch (type) {
+ case VXGE_HW_CHANNEL_TYPE_FIFO:
+ size = sizeof(struct __vxge_hw_fifo);
+ break;
+ case VXGE_HW_CHANNEL_TYPE_RING:
+ size = sizeof(struct __vxge_hw_ring);
+ break;
+ default:
+ break;
+ }
+
+ channel = kzalloc(size, GFP_KERNEL);
+ if (channel == NULL)
+ goto exit0;
+ INIT_LIST_HEAD(&channel->item);
+
+ channel->common_reg = hldev->common_reg;
+ channel->first_vp_id = hldev->first_vp_id;
+ channel->type = type;
+ channel->devh = hldev;
+ channel->vph = vph;
+ channel->userdata = userdata;
+ channel->per_dtr_space = per_dtr_space;
+ channel->length = length;
+ channel->vp_id = vp_id;
+
+ channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->work_arr == NULL)
+ goto exit1;
+
+ channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->free_arr == NULL)
+ goto exit1;
+ channel->free_ptr = length;
+
+ channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->reserve_arr == NULL)
+ goto exit1;
+ channel->reserve_ptr = length;
+ channel->reserve_top = 0;
+
+ channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->orig_arr == NULL)
+ goto exit1;
+
+ return channel;
+exit1:
+ __vxge_hw_channel_free(channel);
+
+exit0:
+ return NULL;
+}
+
+/*
+ * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
+ * Adds a block to block pool
+ */
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
+{
+ struct __vxge_hw_blockpool *blockpool;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ dma_addr_t dma_addr;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u32 req_out;
+
+ blockpool = &devh->block_pool;
+
+ if (block_addr == NULL) {
+ blockpool->req_out--;
status = VXGE_HW_FAIL;
goto exit;
}
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
+ dma_addr = pci_map_single(devh->pdev, block_addr, length,
+ PCI_DMA_BIDIRECTIONAL);
- config = &hldev->config.vp_config[vp_id].ring;
+ if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+ vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
+ blockpool->req_out--;
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
+ if (entry == NULL)
+ entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
- if (ring == NULL) {
+ if (entry != NULL) {
+ entry->length = length;
+ entry->memblock = block_addr;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_h;
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->rxds_limit = config->rxds_limit;
+ blockpool->req_out--;
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
+ req_out = blockpool->req_out;
+exit:
+ return;
+}
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+static inline void
+vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
+ vaddr = kmalloc((size), flags);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
+/*
+ * __vxge_hw_blockpool_blocks_add - Request additional blocks
+ */
+static
+void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
+{
+ u32 nreq = 0, i;
+
+ if ((blockpool->pool_size + blockpool->req_out) <
+ VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
+ nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
+ blockpool->req_out += nreq;
}
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
+ for (i = 0; i < nreq; i++)
+ vxge_os_dma_malloc_async(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ blockpool->hldev, VXGE_HW_BLOCK_SIZE);
+}
+
+/*
+ * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
+ * Allocates a block of memory of given size, either from block pool
+ * or by calling vxge_os_dma_malloc()
+ */
+static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+ void *memblock = NULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ blockpool = &devh->block_pool;
+
+ if (size != blockpool->block_size) {
+
+ memblock = vxge_os_dma_malloc(devh->pdev, size,
+ &dma_object->handle,
+ &dma_object->acc_handle);
+
+ if (memblock == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- }
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
+ dma_object->addr = pci_map_single(devh->pdev, memblock, size,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (unlikely(pci_dma_mapping_error(devh->pdev,
+ dma_object->addr))) {
+ vxge_os_dma_free(devh->pdev, memblock,
+ &dma_object->acc_handle);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ } else {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ dma_object->addr = entry->dma_addr;
+ dma_object->handle = entry->dma_handle;
+ dma_object->acc_handle = entry->acc_handle;
+ memblock = entry->memblock;
+
+ list_add(&entry->item,
+ &blockpool->free_entry_list);
+ blockpool->pool_size--;
+ }
+
+ if (memblock != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+ }
exit:
- return status;
+ return memblock;
}
/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
+ * __vxge_hw_blockpool_blocks_remove - Free additional blocks
*/
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static void
+__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
+ struct list_head *p, *n;
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
- if (rxdh == NULL)
+ if (blockpool->pool_size < blockpool->pool_max)
break;
- vxge_hw_channel_dtr_complete(channel);
+ pci_unmap_single(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
+ vxge_os_dma_free(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- return VXGE_HW_OK;
+ list_add(p, &blockpool->free_entry_list);
+
+ blockpool->pool_size--;
+
+ }
}
/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
+ * __vxge_hw_blockpool_free - Frees the memory allcoated with
+ * __vxge_hw_blockpool_malloc
*/
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
+ void *memblock, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
- channel = &ring->channel;
+ blockpool = &devh->block_pool;
- __vxge_hw_ring_abort(ring);
+ if (size != blockpool->block_size) {
+ pci_unmap_single(devh->pdev, dma_object->addr, size,
+ PCI_DMA_BIDIRECTIONAL);
+ vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
+ } else {
- status = __vxge_hw_channel_reset(channel);
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
- if (status != VXGE_HW_OK)
- goto exit;
+ if (entry == NULL)
+ entry = vmalloc(sizeof(
+ struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
+ if (entry != NULL) {
+ entry->length = size;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_object->addr;
+ entry->acc_handle = dma_object->acc_handle;
+ entry->dma_handle = dma_object->handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+ if (status == VXGE_HW_OK)
+ __vxge_hw_blockpool_blocks_remove(blockpool);
}
-exit:
- return status;
}
/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
+ * vxge_hw_mempool_destroy
*/
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
+ u32 i, j;
+ struct __vxge_hw_device *devh = mempool->devh;
- __vxge_hw_ring_abort(ring);
+ for (i = 0; i < mempool->memblocks_allocated; i++) {
+ struct vxge_hw_mempool_dma *dma_object;
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
+ vxge_assert(mempool->memblocks_arr[i]);
+ vxge_assert(mempool->memblocks_dma_arr + i);
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
+ dma_object = mempool->memblocks_dma_arr + i;
- return VXGE_HW_OK;
+ for (j = 0; j < mempool->items_per_memblock; j++) {
+ u32 index = i * mempool->items_per_memblock + j;
+
+ /* to skip last partially filled(if any) memblock */
+ if (index >= mempool->items_current)
+ break;
+ }
+
+ vfree(mempool->memblocks_priv_arr[i]);
+
+ __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
+ mempool->memblock_size, dma_object);
+ }
+
+ vfree(mempool->items_arr);
+ vfree(mempool->memblocks_dma_arr);
+ vfree(mempool->memblocks_priv_arr);
+ vfree(mempool->memblocks_arr);
+ vfree(mempool);
}
/*
@@ -1627,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] =
- vmalloc(mempool->items_priv_size * n_items);
+ vzalloc(mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(mempool->memblocks_priv_arr[i], 0,
- mempool->items_priv_size * n_items);
-
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] =
__vxge_hw_blockpool_malloc(mempool->devh,
@@ -1686,16 +2642,15 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
+static struct vxge_hw_mempool *
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
+ u32 memblock_size,
+ u32 item_size,
+ u32 items_priv_size,
+ u32 items_initial,
+ u32 items_max,
+ struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata)
{
enum vxge_hw_status status = VXGE_HW_OK;
u32 memblocks_to_allocate;
@@ -1707,13 +2662,11 @@ __vxge_hw_mempool_create(
goto exit;
}
- mempool = (struct vxge_hw_mempool *)
- vmalloc(sizeof(struct vxge_hw_mempool));
+ mempool = vzalloc(sizeof(struct vxge_hw_mempool));
if (mempool == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(mempool, 0, sizeof(struct vxge_hw_mempool));
mempool->devh = devh;
mempool->memblock_size = memblock_size;
@@ -1733,53 +2686,43 @@ __vxge_hw_mempool_create(
/* allocate array of memblocks */
mempool->memblocks_arr =
- (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_arr, 0,
- sizeof(void *) * mempool->memblocks_max);
/* allocate array of private parts of items per memblocks */
mempool->memblocks_priv_arr =
- (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_priv_arr, 0,
- sizeof(void *) * mempool->memblocks_max);
/* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
- vmalloc(sizeof(struct vxge_hw_mempool_dma) *
+ mempool->memblocks_dma_arr =
+ vzalloc(sizeof(struct vxge_hw_mempool_dma) *
mempool->memblocks_max);
-
if (mempool->memblocks_dma_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_dma_arr, 0,
- sizeof(struct vxge_hw_mempool_dma) *
- mempool->memblocks_max);
/* allocate hash array of items */
- mempool->items_arr =
- (void **) vmalloc(sizeof(void *) * mempool->items_max);
+ mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
if (mempool->items_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
/* calculate initial number of memblocks */
memblocks_to_allocate = (mempool->items_initial +
@@ -1801,122 +2744,188 @@ exit:
}
/*
- * vxge_hw_mempool_destroy
+ * __vxge_hw_ring_abort - Returns the RxD
+ * This function terminates the RxDs of ring
*/
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
+ void *rxdh;
+ struct __vxge_hw_channel *channel;
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
+ channel = &ring->channel;
- dma_object = mempool->memblocks_dma_arr + i;
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(channel, &rxdh);
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
+ if (rxdh == NULL)
+ break;
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
+ vxge_hw_channel_dtr_complete(channel);
- vfree(mempool->memblocks_priv_arr[i]);
+ if (ring->rxd_term)
+ ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
+ channel->userdata);
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
+ vxge_hw_channel_dtr_free(channel, rxdh);
}
- vfree(mempool->items_arr);
+ return VXGE_HW_OK;
+}
- vfree(mempool->memblocks_dma_arr);
+/*
+ * __vxge_hw_ring_reset - Resets the ring
+ * This function resets the ring during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_channel *channel;
- vfree(mempool->memblocks_priv_arr);
+ channel = &ring->channel;
- vfree(mempool->memblocks_arr);
+ __vxge_hw_ring_abort(ring);
- vfree(mempool);
+ status = __vxge_hw_channel_reset(channel);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+exit:
+ return status;
}
/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
+ * __vxge_hw_ring_delete - Removes the ring
+ * This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+static enum vxge_hw_status
+__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
+ struct __vxge_hw_ring *ring = vp->vpath->ringh;
+
+ __vxge_hw_ring_abort(ring);
+
+ if (ring->mempool)
+ __vxge_hw_mempool_destroy(ring->mempool);
+
+ vp->vpath->ringh = NULL;
+ __vxge_hw_channel_free(&ring->channel);
return VXGE_HW_OK;
}
/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
+ * __vxge_hw_ring_create - Create a Ring
+ * This function creates Ring and initializes it.
*/
static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+ struct vxge_hw_ring_attr *attr)
{
- enum vxge_hw_status status;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_ring *ring;
+ u32 ring_length;
+ struct vxge_hw_ring_config *config;
+ struct __vxge_hw_device *hldev;
+ u32 vp_id;
+ struct vxge_hw_mempool_cbs ring_mp_callback;
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth >
- VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+ if ((vp == NULL) || (attr == NULL)) {
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
+ hldev = vp->vpath->hldev;
+ vp_id = vp->vpath->vp_id;
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
+ config = &hldev->config.vp_config[vp_id].ring;
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+ ring_length = config->ring_blocks *
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
- return VXGE_HW_OK;
-}
+ ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
+ VXGE_HW_CHANNEL_TYPE_RING,
+ ring_length,
+ attr->per_rxd_space,
+ attr->userdata);
+ if (ring == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
+ vp->vpath->ringh = ring;
+ ring->vp_id = vp_id;
+ ring->vp_reg = vp->vpath->vp_reg;
+ ring->common_reg = hldev->common_reg;
+ ring->stats = &vp->vpath->sw_stats->ring_stats;
+ ring->config = config;
+ ring->callback = attr->callback;
+ ring->rxd_init = attr->rxd_init;
+ ring->rxd_term = attr->rxd_term;
+ ring->buffer_mode = config->buffer_mode;
+ ring->rxds_limit = config->rxds_limit;
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
+ ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
+ ring->rxd_priv_size =
+ sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
+ ring->per_rxd_space = attr->per_rxd_space;
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
+ ring->rxd_priv_size =
+ ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+ VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
+ /* how many RxDs can fit into one block. Depends on configured
+ * buffer_mode. */
+ ring->rxds_per_block =
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+ /* calculate actual RxD block private size */
+ ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+ ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
+ ring->mempool = __vxge_hw_mempool_create(hldev,
+ VXGE_HW_BLOCK_SIZE,
+ VXGE_HW_BLOCK_SIZE,
+ ring->rxdblock_priv_size,
+ ring->config->ring_blocks,
+ ring->config->ring_blocks,
+ &ring_mp_callback,
+ ring);
+ if (ring->mempool == NULL) {
+ __vxge_hw_ring_delete(vp);
+ return VXGE_HW_ERR_OUT_OF_MEMORY;
}
- return VXGE_HW_OK;
+ status = __vxge_hw_channel_initialize(&ring->channel);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+
+ /* Note:
+ * Specifying rxd_init callback means two things:
+ * 1) rxds need to be initialized by driver at channel-open time;
+ * 2) rxds need to be posted at channel-open time
+ * (that's what the initial_replenish() below does)
+ * Currently we don't have a case when the 1) is done without the 2).
+ */
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+ }
+
+ /* initial replenish will increment the counter in its post() routine,
+ * we have to reset it */
+ ring->stats->common_stats.usage_cnt = 0;
+exit:
+ return status;
}
/*
@@ -1938,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
device_config->vp_config[i].vp_id = i;
device_config->vp_config[i].min_bandwidth =
@@ -2078,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
}
/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
-
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
@@ -2156,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* Set the swapper bits appropriately for the vpath.
*/
static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(
- struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
u64 val64;
@@ -2408,6 +3360,69 @@ exit:
}
/*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+ void *txdlh;
+
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
+
+ if (txdlh == NULL)
+ break;
+
+ vxge_hw_channel_dtr_complete(&fifo->channel);
+
+ if (fifo->txdl_term) {
+ fifo->txdl_term(txdlh,
+ VXGE_HW_TXDL_STATE_POSTED,
+ fifo->channel.userdata);
+ }
+
+ vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ __vxge_hw_fifo_abort(fifo);
+ status = __vxge_hw_channel_reset(&fifo->channel);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
+
+ __vxge_hw_fifo_abort(fifo);
+
+ if (fifo->mempool)
+ __vxge_hw_mempool_destroy(fifo->mempool);
+
+ vp->vpath->fifoh = NULL;
+
+ __vxge_hw_channel_free(&fifo->channel);
+
+ return VXGE_HW_OK;
+}
+
+/*
* __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
* list callback
* This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2453,7 +3468,7 @@ __vxge_hw_fifo_mempool_item_alloc(
* __vxge_hw_fifo_create - Create a FIFO
* This function creates FIFO and initializes it.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_fifo_attr *attr)
{
@@ -2572,68 +3587,6 @@ exit:
}
/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
* __vxge_hw_vpath_pci_read - Read the content of given address
* in pci config space.
* Read from the vpath pci config space.
@@ -2675,297 +3628,6 @@ exit:
return status;
}
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u64 dta_struct_sel)
-{
- writeq(0, &vpath_reg->rts_access_steer_ctrl);
- wmb();
- writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
- writeq(0, &vpath_reg->rts_access_steer_data1);
- wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i, j;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
- u8 *serial_number = hw_info->serial_number;
- u8 *part_number = hw_info->part_number;
- u8 *product_desc = hw_info->product_desc;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)serial_number)[1] = be64_to_cpu(data2);
- status = VXGE_HW_OK;
- } else
- *serial_number = 0;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)part_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)part_number)[1] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
-
- } else
- *part_number = 0;
-
- j = 0;
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
-
- __vxge_hw_read_rts_ds(vpath_reg, i);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
- } else
- *product_desc = 0;
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- fw_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
- data1);
- fw_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
- data1);
- fw_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
- data1);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
- fw_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
- fw_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
- flash_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
- flash_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
- "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
- flash_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
- flash_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
- status = VXGE_HW_OK;
-
- } else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
- u64 data1 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- status = VXGE_HW_OK;
- } else {
- data1 = 0;
- status = VXGE_HW_FAIL;
- }
-exit:
- return data1;
-}
-
/**
* vxge_hw_device_flick_link_led - Flick (blink) link LED.
* @hldev: HW device.
@@ -2974,37 +3636,24 @@ exit:
* Flicker the link LED.
*/
enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
- u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (hldev == NULL) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
- vp_reg = hldev->vpath_reg[hldev->first_vp_id];
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- writeq(0, &vp_reg->rts_access_steer_ctrl);
- wmb();
- writeq(on_off, &vp_reg->rts_access_steer_data0);
- writeq(0, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ data0 = on_off;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3013,63 +3662,38 @@ exit:
* __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+ u32 action, u32 rts_table, u32 offset,
+ u64 *data0, u64 *data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- enum vxge_hw_status status = VXGE_HW_OK;
+ enum vxge_hw_status status;
+ u64 steer_ctrl = 0;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+ steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
}
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ data0, data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- *data1 = readq(&vp_reg->rts_access_steer_data0);
-
- if ((rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- *data2 = readq(&vp_reg->rts_access_steer_data1);
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+ (rts_table !=
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ *data1 = 0;
exit:
return status;
}
@@ -3078,107 +3702,27 @@ exit:
* __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
- u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+ u32 rts_table, u32 offset, u64 steer_data0,
+ u64 steer_data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- writeq(data1, &vp_reg->rts_access_steer_data0);
- wmb();
+ data0 = steer_data0;
if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- writeq(data2, &vp_reg->rts_access_steer_data1);
- wmb();
- }
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
- status = VXGE_HW_OK;
- else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
- u32 i;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ data1 = steer_data1;
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3204,6 +3748,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, &data0, &data1);
+ if (status != VXGE_HW_OK)
+ goto exit;
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -3771,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
vp_reg = vpath->vp_reg;
config = vpath->vp_config;
- writeq((u64)0, &vp_reg->tim_dest_addr);
- writeq((u64)0, &vp_reg->tim_vpath_map);
- writeq((u64)0, &vp_reg->tim_bitmap);
- writeq((u64)0, &vp_reg->tim_remap);
+ writeq(0, &vp_reg->tim_dest_addr);
+ writeq(0, &vp_reg->tim_vpath_map);
+ writeq(0, &vp_reg->tim_bitmap);
+ writeq(0, &vp_reg->tim_remap);
if (config->ring.enable == VXGE_HW_RING_ENABLE)
writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3876,8 +4422,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
- config->tti.util_sel);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3981,8 +4526,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
- config->rti.util_sel);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4003,11 +4547,15 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+ val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
+ writeq(val64, &vp_reg->tim_wrkld_clc);
+
return status;
}
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4018,17 +4566,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
vp_reg = vpath->vp_reg;
config = vpath->vp_config;
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+ if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
+ config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- writeq(val64,
- &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
}
}
+
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
@@ -4052,22 +4598,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
vp_reg = vpath->vp_reg;
status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
@@ -4075,7 +4617,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
/* Get MRRS value from device control */
status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
-
if (status == VXGE_HW_OK) {
val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
val64 &=
@@ -4099,6 +4640,28 @@ exit:
}
/*
+ * __vxge_hw_vp_terminate - Terminate Virtual Path structure
+ * This routine closes all channels it opened and freeup memory
+ */
+static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = &hldev->virtual_paths[vp_id];
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
+ goto exit;
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
+ vpath->hldev->tim_int_mask1, vpath->vp_id);
+ hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
+
+ memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+exit:
+ return;
+}
+
+/*
* __vxge_hw_vp_initialize - Initialize Virtual Path structure
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
@@ -4117,6 +4680,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
vpath = &hldev->virtual_paths[vp_id];
+ spin_lock_init(&hldev->virtual_paths[vp_id].lock);
vpath->vp_id = vp_id;
vpath->vp_open = VXGE_HW_VP_OPEN;
vpath->hldev = hldev;
@@ -4127,14 +4691,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
__vxge_hw_vpath_reset(hldev, vp_id);
status = __vxge_hw_vpath_reset_check(vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
}
status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
@@ -4148,7 +4710,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
hldev->tim_int_mask1, vp_id);
status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
if (status != VXGE_HW_OK)
__vxge_hw_vp_terminate(hldev, vp_id);
exit:
@@ -4156,29 +4717,6 @@ exit:
}
/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-exit:
- return;
-}
-
-/*
* vxge_hw_vpath_mtu_set - Set MTU.
* Set new MTU value. Example, to use jumbo frames:
* vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4215,6 +4753,64 @@ exit:
}
/*
+ * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
+ * Enable the DMA vpath statistics. The function is to be called to re-enable
+ * the adapter to update stats into the host memory
+ */
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = vp->vpath;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ memcpy(vpath->hw_stats_sav, vpath->hw_stats,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
+ * This function allocates a block from block pool or from the system
+ */
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (size == blockpool->block_size) {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ blockpool->pool_size--;
+ }
+ }
+
+ if (entry != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+
+ return entry;
+}
+
+/*
* vxge_hw_vpath_open - Open a virtual path on a given adapter
* This function is used to open access to virtual path of an
* adapter for offload, GRO operations. This function returns
@@ -4238,19 +4834,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
&hldev->config.vp_config[attr->vp_id]);
-
if (status != VXGE_HW_OK)
goto vpath_open_exit1;
- vp = (struct __vxge_hw_vpath_handle *)
- vmalloc(sizeof(struct __vxge_hw_vpath_handle));
+ vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
if (vp == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit2;
}
- memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
-
vp->vpath = vpath;
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4273,7 +4865,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
VXGE_HW_BLOCK_SIZE);
-
if (vpath->stats_block == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit8;
@@ -4332,19 +4923,20 @@ vpath_open_exit1:
* This function is used to close access to virtual path opened
* earlier.
*/
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
+void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
{
- struct __vxge_hw_virtualpath *vpath = NULL;
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+ struct __vxge_hw_ring *ring = vpath->ringh;
+ struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
u64 new_count, val64, val164;
- struct __vxge_hw_ring *ring;
- vpath = vp->vpath;
- ring = vpath->ringh;
+ if (vdev->titan1) {
+ new_count = readq(&vpath->vp_reg->rxdmem_size);
+ new_count &= 0x1fff;
+ } else
+ new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+ val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
&vpath->vp_reg->prc_rxd_doorbell);
@@ -4367,6 +4959,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
}
/*
+ * __vxge_hw_blockpool_block_free - Frees a block from block pool
+ * @devh: Hal device
+ * @entry: Entry of block to be freed
+ *
+ * This function frees a block from block pool
+ */
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
+ struct __vxge_hw_blockpool_entry *entry)
+{
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (entry->length == blockpool->block_size) {
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ }
+
+ __vxge_hw_blockpool_blocks_remove(blockpool);
+}
+
+/*
* vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
* This function is used to close access to virtual path opened
* earlier.
@@ -4414,7 +5029,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_vp_terminate(devh, vp_id);
+ spin_lock(&vpath->lock);
vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+ spin_unlock(&vpath->lock);
vpath_close_exit:
return status;
@@ -4515,730 +5132,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg1);
}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *) vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-
-static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
-
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
-
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = pci_map_single(hldev->pdev, memblock,
- VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(hldev->pdev,
- dma_addr))) {
-
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
-
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
- u16 ret;
-
- if (blockpool == NULL) {
- ret = 1;
- goto exit;
- }
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- pci_unmap_single(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- PCI_DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
-
- list_del(
- &((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(
- &((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree((void *)p);
- }
- ret = 0;
-exit:
- return;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- PCI_DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 req_out;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- dma_addr = pci_map_single(devh->pdev, block_addr, length,
- PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
-
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = (struct __vxge_hw_blockpool_entry *)
- vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- blockpool->req_out--;
-
- req_out = blockpool->req_out;
-exit:
- return;
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (memblock == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- dma_object->addr = pci_map_single(devh->pdev, memblock, size,
- PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(devh->pdev,
- dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- __vxge_hw_blockpool_malloc
- */
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- pci_unmap_single(devh->pdev, dma_object->addr, size,
- PCI_DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = (struct __vxge_hw_blockpool_entry *)
- vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..e249e288d160 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
#define VXGE_CACHE_LINE_SIZE 128
#endif
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
- char buff[255]; \
- snprintf(buff, 255, fmt, __VA_ARGS__); \
- printk(buff); \
- printk("\n"); \
-}
-
#ifndef VXGE_ALIGN
#define VXGE_ALIGN(adrs, size) \
(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES 8
+struct eprom_image {
+ u8 is_valid:1;
+ u8 index;
+ u8 type;
+ u16 version;
+};
+
+#ifdef VXGE_DEBUG_ASSERT
/**
* vxge_assert
* @test: C-condition to check
@@ -48,16 +49,13 @@
* compilation
* time.
*/
-#define vxge_assert(test) { \
- if (!(test)) \
- vxge_os_bug("bad cond: "#test" at %s:%d\n", \
- __FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
#else
#define vxge_assert(test)
#endif /* end of VXGE_DEBUG_ASSERT */
/**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
* @VXGE_NONE: debug disabled
* @VXGE_ERR: all errors going to be logged out
* @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
};
/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE: upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR: upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+ VXGE_HW_FW_UPGRADE_OK = 0,
+ VXGE_HW_FW_UPGRADE_DONE = 1,
+ VXGE_HW_FW_UPGRADE_ERR = 2,
+ VXGE_FW_UPGRADE_BYTES2SKIP = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
+ VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
+ VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
+ VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
+};
+
+/**
* struct vxge_hw_device_date - Date Format
* @day: Day
* @month: Month
@@ -275,9 +314,9 @@ struct vxge_hw_ring_config {
#define VXGE_HW_RING_DEFAULT 1
u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
+#define VXGE_HW_MIN_RING_BLOCKS 1
+#define VXGE_HW_MAX_RING_BLOCKS 128
+#define VXGE_HW_DEF_RING_BLOCKS 2
u32 buffer_mode;
#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
* See also: vxge_hw_driver_initialize().
*/
struct vxge_hw_uld_cbs {
-
void (*link_up)(struct __vxge_hw_device *devh);
void (*link_down)(struct __vxge_hw_device *devh);
void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
struct vxge_hw_vpath_stats_hw_info *hw_stats;
struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ spinlock_t lock;
};
/*
@@ -661,7 +700,7 @@ struct __vxge_hw_virtualpath {
*
* This structure is used to store the callback information.
*/
-struct __vxge_hw_vpath_handle{
+struct __vxge_hw_vpath_handle {
struct list_head item;
struct __vxge_hw_virtualpath *vpath;
};
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
/**
* struct __vxge_hw_device - Hal device object
* @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
* @bar0: BAR0 virtual address.
* @pdev: Physical device handle
* @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
u32 magic;
#define VXGE_HW_DEVICE_MAGIC 0x12345678
#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- u16 device_id;
- u8 major_revision;
- u8 minor_revision;
void __iomem *bar0;
struct pci_dev *pdev;
struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
u32 debug_level;
u32 level_err;
u32 level_trace;
+ u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
};
#define VXGE_HW_INFO_LEN 64
@@ -781,8 +815,8 @@ struct vxge_hw_device_hw_info {
u8 serial_number[VXGE_HW_INFO_LEN];
u8 part_number[VXGE_HW_INFO_LEN];
u8 product_desc[VXGE_HW_INFO_LEN];
- u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
};
/**
@@ -829,20 +863,10 @@ struct vxge_hw_device_attr {
loc, \
offset, \
&val64); \
- \
if (status != VXGE_HW_OK) \
return status; \
}
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
/*
* struct __vxge_hw_ring - Ring channel.
* @channel: Channel "base" of this ring, the common part of all HW
@@ -1114,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper {
* lookup to determine the transmit port.
* 01: Send on physical Port1.
* 10: Send on physical Port0.
- * 11: Send on both ports.
+ * 11: Send on both ports.
* Bits 18 to 21 - Reserved
* Bits 22 to 23 - Gather_Code. This field is set by the host and
* is used to describe how individual buffers comprise a frame.
@@ -1413,12 +1437,12 @@ enum vxge_hw_rth_algoritms {
* See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
*/
struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en;
- u8 hash_type_ipv4_en;
- u8 hash_type_tcpipv6_en;
- u8 hash_type_ipv6_en;
- u8 hash_type_tcpipv6ex_en;
- u8 hash_type_ipv6ex_en;
+ u8 hash_type_tcpipv4_en:1,
+ hash_type_ipv4_en:1,
+ hash_type_tcpipv6_en:1,
+ hash_type_ipv6_en:1,
+ hash_type_tcpipv6ex_en:1,
+ hash_type_ipv6ex_en:1;
};
void vxge_hw_device_debug_set(
@@ -1893,6 +1917,15 @@ out:
return vaddr;
}
+static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -1962,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
@@ -2000,7 +2032,7 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
- * vxge_debug
+ * vxge_debug_ll
* @level: level of debug verbosity.
* @mask: mask for the debug
* @buf: Circular buffer for tracing
@@ -2012,26 +2044,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
* may be compiled out if DEBUG macro was never defined.
* See also: enum vxge_debug_level{}.
*/
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
- vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
- (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
- if ((mask & VXGE_DEBUG_MASK) == mask)\
- vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
- vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do { \
+ if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
+ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+ if ((mask & VXGE_DEBUG_MASK) == mask) \
+ printk(fmt "\n", __VA_ARGS__); \
+} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)
#endif
@@ -2051,4 +2070,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+ int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..1dd3a21b3a43 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
* Virtualized Server Adapter.
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
* Return value:
* 0 on success.
*/
-
static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
{
/* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
* Returns driver specefic information like name, version etc.. to ethtool.
*/
static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
- struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
* buffer area.
*/
static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
+ struct ethtool_regs *regs, void *space)
{
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u64 *reg_space = (u64 *) space;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ u64 *reg_space = (u64 *)space;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
*/
static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
* void
*/
static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
* int, returns 0 on Success
*/
static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
enum vxge_hw_status status;
enum vxge_hw_status swstatus;
struct vxge_vpath *vpath = NULL;
-
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
struct vxge_hw_xmac_stats *xmac_stats;
struct vxge_hw_device_stats_sw_info *sw_stats;
struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
kfree(hw_stats);
}
-static void vxge_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
int stat_size = 0;
int i, j;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (stringset) {
case ETH_SS_STATS:
vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
static int vxge_ethtool_get_regs_len(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
static u32 vxge_get_rx_csum(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return vdev->rx_csum;
}
static int vxge_set_rx_csum(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
if (data)
vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
}
}
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ enum vxge_hw_status status;
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+ return 0;
+
+ if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+ return -EINVAL;
+
+ vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ status = vxge_reset_all_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+ return -EFAULT;
+ }
+
+ if (vdev->devh->config.rth_en)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+ printk(KERN_INFO "Single Function Mode is required to flash the"
+ " firmware\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ printk(KERN_INFO "Interface %s must be down to flash the "
+ "firmware\n", dev->name);
+ return -EBUSY;
+ }
+
+ return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
static const struct ethtool_ops vxge_ethtool_ops = {
.get_settings = vxge_ethtool_gset,
.set_settings = vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_rx_csum = vxge_get_rx_csum,
.set_rx_csum = vxge_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
+ .set_flags = vxge_set_flags,
+ .flash_device = vxge_fw_flash,
};
void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..1ac9b568f1b0 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
#include <net/ip.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
#include "vxge-main.h"
#include "vxge-reg.h"
@@ -82,16 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -148,11 +140,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
-static void
-vxge_callback_link_up(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
vdev->ndev->name, __func__, __LINE__);
@@ -172,11 +163,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
-static void
-vxge_callback_link_down(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -195,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
*
* Allocate SKB.
*/
-static struct sk_buff*
+static struct sk_buff *
vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
{
struct net_device *dev;
@@ -369,7 +359,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
+ struct net_device *dev = ring->ndev;
unsigned int dma_sizes;
void *first_dtr = NULL;
int dtr_cnt = 0;
@@ -413,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
prefetch((char *)skb + L1_CACHE_BYTES);
if (unlikely(t_code)) {
-
if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
VXGE_HW_OK) {
@@ -436,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
}
if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
-
if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
-
if (!vxge_rx_map(dtr, ring)) {
skb_put(skb, pkt_length);
@@ -513,6 +500,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
else
skb_checksum_none_assert(skb);
+
+ if (ring->rx_hwts) {
+ struct skb_shared_hwtstamps *skb_hwts;
+ u32 ns = *(u32 *)(skb->head + pkt_length);
+
+ skb_hwts = skb_hwtstamps(skb);
+ skb_hwts->hwtstamp = ns_to_ktime(ns);
+ skb_hwts->syststamp.tv64 = 0;
+ }
+
+ /* rth_hash_type and rth_it_hit are non-zero regardless of
+ * whether rss is enabled. Only the rth_value is zero/non-zero
+ * if rss is disabled/enabled, so key off of that.
+ */
+ if (ext_info.rth_value)
+ skb->rxhash = ext_info.rth_value;
+
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -660,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
return FALSE;
}
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct vxge_mac_addrs *new_mac_entry;
+ u8 *mac_address = NULL;
+
+ if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
+ return TRUE;
+
+ new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
+ if (!new_mac_entry) {
+ vxge_debug_mem(VXGE_ERR,
+ "%s: memory allocation failed",
+ VXGE_DRIVER_NAME);
+ return FALSE;
+ }
+
+ list_add(&new_mac_entry->item, &vpath->mac_addr_list);
+
+ /* Copy the new mac address to the list */
+ mac_address = (u8 *)&new_mac_entry->macaddr;
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ new_mac_entry->state = mac->state;
+ vpath->mac_addr_cnt++;
+
+ /* Is this a multicast address */
+ if (0x01 & mac->macaddr[0])
+ vpath->mcast_addr_cnt++;
+
+ return TRUE;
+}
+
+/* Add a mac address to DA table */
+static enum vxge_hw_status
+vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+ enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
+
+ if (0x01 & mac->macaddr[0]) /* multicast address */
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
+ else
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
+ mac->macmask, duplicate_mode);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config add entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ if (FALSE == vxge_mac_list_add(vpath, mac))
+ status = -EPERM;
+
+ return status;
+}
+
static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
{
struct macInfo mac_info;
@@ -670,7 +733,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
struct vxge_vpath *vpath = NULL;
struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +832,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
@@ -1005,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
"%s:%d Exiting...", __func__, __LINE__);
}
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct list_head *entry, *next;
+ u64 del_mac = 0;
+ u8 *mac_address = (u8 *) (&del_mac);
+
+ /* Copy the mac address to delete from the list */
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
+ list_del(entry);
+ kfree((struct vxge_mac_addrs *)entry);
+ vpath->mac_addr_cnt--;
+
+ /* Is this a multicast address */
+ if (0x01 & mac->macaddr[0])
+ vpath->mcast_addr_cnt--;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/* delete a mac address from DA table */
+static enum vxge_hw_status
+vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
+ mac->macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config delete entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ vxge_mac_list_del(vpath, mac);
+ return status;
+}
+
/**
* vxge_set_multicast
* @dev: pointer to the device structure
@@ -1034,7 +1141,7 @@ static void vxge_set_multicast(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = (struct __vxge_hw_device *)vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1201,7 @@ static void vxge_set_multicast(struct net_device *dev)
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1244,7 @@ _set_all_mcast:
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1291,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info_new, mac_info_old;
int vpath_idx = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = vdev->devh;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1399,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ struct __vxge_hw_device *hldev;
int msix_id;
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
vxge_hw_vpath_intr_disable(vpath->handle);
if (vdev->config.intr_type == INTA)
@@ -1310,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
}
}
+/* list all mac addresses from DA table */
+static enum vxge_hw_status
+vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ unsigned char macmask[ETH_ALEN];
+ unsigned char macaddr[ETH_ALEN];
+
+ status = vxge_hw_vpath_mac_addr_get(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config list entry failed for vpath:%d",
+ vpath->device_id);
+ return status;
+ }
+
+ while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK)
+ break;
+ }
+
+ return status;
+}
+
+/* Store all mac addresses from the list to the DA table */
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct macInfo mac_info;
+ u8 *mac_address = NULL;
+ struct list_head *entry, *next;
+
+ memset(&mac_info, 0, sizeof(struct macInfo));
+
+ if (vpath->is_open) {
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ mac_address =
+ (u8 *)&
+ ((struct vxge_mac_addrs *)entry)->macaddr;
+ memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+ ((struct vxge_mac_addrs *)entry)->state =
+ VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ /* does this mac address already exist in da table? */
+ status = vxge_search_mac_addr_in_da_table(vpath,
+ &mac_info);
+ if (status != VXGE_HW_OK) {
+ /* Add this mac address to the DA table */
+ status = vxge_hw_vpath_mac_addr_add(
+ vpath->handle, mac_info.macaddr,
+ mac_info.macmask,
+ VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA add entry failed for vpath:%d",
+ vpath->device_id);
+ ((struct vxge_mac_addrs *)entry)->state
+ = VXGE_LL_MAC_ADDR_IN_LIST;
+ }
+ }
+ }
+ }
+
+ return status;
+}
+
+/* Store all vlan ids from the list to the vid table */
+static enum vxge_hw_status
+vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxgedev *vdev = vpath->vdev;
+ u16 vid;
+
+ if (vdev->vlgrp && vpath->is_open) {
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(vdev->vlgrp, vid))
+ continue;
+ /* Add these vlan to the vid table */
+ status = vxge_hw_vpath_vid_add(vpath->handle, vid);
+ }
+ }
+
+ return status;
+}
+
/*
* vxge_reset_vpath
* @vdev: pointer to vdev
@@ -1405,12 +1606,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
}
if (event == VXGE_LL_FULL_RESET) {
+ netif_carrier_off(vdev->ndev);
+
/* wait for all the vpath reset to complete */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
while (test_bit(vp_id, &vdev->vp_reset))
msleep(50);
}
+ netif_carrier_on(vdev->ndev);
+
/* if execution mode is set to debug, don't reset the adapter */
if (unlikely(vdev->exec_mode)) {
vxge_debug_init(VXGE_ERR,
@@ -1423,6 +1628,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
}
if (event == VXGE_LL_FULL_RESET) {
+ vxge_hw_device_wait_receive_idle(vdev->devh);
vxge_hw_device_intr_disable(vdev->devh);
switch (vdev->cric_err_event) {
@@ -1563,9 +1769,14 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-static int vxge_reset(struct vxgedev *vdev)
+static void vxge_reset(struct work_struct *work)
{
- return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
+ struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
+
+ if (!netif_running(vdev->ndev))
+ return;
+
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
/**
@@ -1608,8 +1819,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
int budget_org = budget;
struct vxge_ring *ring;
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1855,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
*/
static void vxge_netpoll(struct net_device *dev)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
@@ -1689,15 +1899,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
mtable[index] = index % vdev->no_of_vpath;
}
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
/* set indirection table, bucket-to-vpath mapping */
status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
vdev->no_of_vpath,
@@ -1710,19 +1911,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
+ /* Fill RTH hash types */
+ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
+ hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
+ hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
+ hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
+ hash_types.hash_type_tcpipv6ex_en =
+ vdev->config.rth_hash_type_tcpipv6ex;
+ hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
+
/*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
+ * Because the itable_set() method uses the active_table field
+ * for the target virtual path the RTH config should be updated
+ * for all VPATHs. The h/w only uses the lowest numbered VPATH
+ * when steering frames.
+ */
for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
-
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
@@ -1734,201 +1943,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (0x01 & mac->macaddr[0]) /* multicast address */
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree((struct vxge_mac_addrs *)entry);
- vpath->mac_addr_cnt--;
-
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-/* delete a mac address from DA table */
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/* list all mac addresses from DA table */
-enum vxge_hw_status
-static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
-
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (vdev->vlgrp && vpath->is_open) {
-
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(vdev->vlgrp, vid))
- continue;
- /* Add these vlan to the vid table */
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
/* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1988,8 +2004,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
-
vxge_assert(vpath->is_configured);
+
+ if (!vdev->titan1) {
+ struct vxge_hw_vp_config *vcfg;
+ vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+ vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+ vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+ vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+ vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+ vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+ vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+ vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+ vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+ vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+ }
+
attr.vp_id = vpath->device_id;
attr.fifo_attr.callback = vxge_xmit_compl;
attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2004,6 +2035,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vpath->ring.ndev = vdev->ndev;
vpath->ring.pdev = vdev->pdev;
+
status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
if (status == VXGE_HW_OK) {
vpath->fifo.handle =
@@ -2024,6 +2056,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->config.fifo_indicate_max_pkts;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
+ vpath->ring.rx_hwts = vdev->rx_hwts;
vpath->is_open = 1;
vdev->vp_handles[i] = vpath->handle;
vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2031,11 +2064,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->stats.vpaths_open++;
} else {
vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR,
- "%s: vpath: %d failed to open "
- "with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
+ vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
+ "open with status: %d",
+ vdev->ndev->name, vpath->device_id,
+ status);
vxge_close_vpaths(vdev, 0);
return -EPERM;
}
@@ -2043,6 +2075,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vp_id = vpath->handle->vpath->vp_id;
vdev->vpaths_deployed |= vxge_mBIT(vp_id);
}
+
return VXGE_HW_OK;
}
@@ -2062,21 +2095,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
struct __vxge_hw_device *hldev;
u64 reason;
enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+ struct vxgedev *vdev = (struct vxgedev *)dev_id;
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
dev = vdev->ndev;
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (pci_channel_offline(vdev->pdev))
return IRQ_NONE;
if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_NONE;
+ return IRQ_HANDLED;
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
- &reason);
+ status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
if (status == VXGE_HW_OK) {
vxge_hw_device_mask_all(hldev);
@@ -2301,8 +2333,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev;
+ hldev = pci_get_drvdata(vdev->pdev);
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
@@ -2529,8 +2561,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-static int
-vxge_open(struct net_device *dev)
+static int vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2539,11 +2570,12 @@ vxge_open(struct net_device *dev)
int ret = 0;
int i;
u64 val64, function_mode;
+
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
function_mode = vdev->config.device_hw_info.function_mode;
/* make sure you have link off by default every time Nic is
@@ -2598,6 +2630,8 @@ vxge_open(struct net_device *dev)
goto out2;
}
}
+ printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+ hldev->config.rth_en ? "enabled" : "disabled");
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
@@ -2683,9 +2717,10 @@ vxge_open(struct net_device *dev)
vxge_os_timer(vdev->vp_reset_timer,
vxge_poll_vp_reset, vdev, (HZ/2));
- if (vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(vdev->vp_lockup_timer,
- vxge_poll_vp_lockup, vdev, (HZ/2));
+ /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+ vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+ HZ / 2);
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -2767,8 +2802,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (unlikely(!is_vxge_card_up(vdev)))
return 0;
@@ -2778,7 +2813,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
msleep(50);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
if (do_io) {
/* Put the vpath back in normal mode */
vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2789,7 +2823,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
struct vxge_hw_mrpcim_reg,
rts_mgr_cbasin_cfg),
&val64);
-
if (status == VXGE_HW_OK) {
val64 &= ~vpath_vector;
status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2818,10 +2851,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
smp_wmb();
}
- del_timer_sync(&vdev->vp_lockup_timer);
+
+ if (vdev->titan1)
+ del_timer_sync(&vdev->vp_lockup_timer);
del_timer_sync(&vdev->vp_reset_timer);
+ if (do_io)
+ vxge_hw_device_wait_receive_idle(hldev);
+
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
/* Disable napi */
if (vdev->config.intr_type != MSI_X)
napi_disable(&vdev->napi);
@@ -2838,8 +2878,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
if (do_io)
vxge_hw_device_intr_disable(vdev->devh);
- mdelay(1000);
-
vxge_rem_isr(vdev);
vxge_napi_del_all(vdev);
@@ -2868,8 +2906,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-static int
-vxge_close(struct net_device *dev)
+static int vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
return 0;
@@ -2943,9 +2980,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
- net_stats->rx_dropped +=
- vdev->vpaths[k].ring.stats.rx_dropped;
-
+ net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2954,6 +2989,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
return net_stats;
}
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+ int enable)
+{
+ enum vxge_hw_status status;
+ u64 val64;
+
+ /* Timestamp is passed to the driver via the FCS, therefore we
+ * must disable the FCS stripping by the adapter. Since this is
+ * required for the driver to load (due to a hardware bug),
+ * there is no need to do anything special here.
+ */
+ if (enable)
+ val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+ VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+ VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+ else
+ val64 = 0;
+
+ status = vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ offsetof(struct vxge_hw_mrpcim_reg,
+ xmac_timestamp),
+ val64);
+ vxge_hw_device_flush_io(vdev->devh);
+ return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+ enum vxge_hw_status status;
+ int i;
+
+ if (copy_from_user(&config, data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* Transmit HW Timestamp not supported */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ status = vxge_timestamp_config(vdev, 0);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ status = vxge_timestamp_config(vdev, 1);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -2966,7 +3096,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
*/
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
+ struct vxgedev *vdev = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
/**
@@ -2977,18 +3120,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
*/
-static void
-vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev)
{
struct vxgedev *vdev;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
- vxge_reset(vdev);
+ schedule_work(&vdev->reset_task);
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
}
@@ -3012,7 +3154,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vpath = &vdev->vpaths[0];
if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3203,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct vxge_vpath *vpath;
int vp_id;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
/* Add these vlan to the vid table */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3230,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vlan_group_set_device(vdev->vlgrp, vid, NULL);
@@ -3110,21 +3252,31 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = vxge_set_multicast,
-
.ndo_do_ioctl = vxge_ioctl,
-
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
.ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
-
.ndo_tx_timeout = vxge_tx_watchdog,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vxge_netpoll,
#endif
};
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+ int ret;
+ u8 revision;
+
+ ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+ if (ret)
+ return -EIO;
+
+ vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+ return 0;
+}
+
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
@@ -3163,6 +3315,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vdev->pdev = hldev->pdev;
memcpy(&vdev->config, config, sizeof(struct vxge_config));
vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
+ vdev->rx_hwts = 0;
+
+ ret = vxge_device_revision(vdev);
+ if (ret < 0)
+ goto _out1;
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
@@ -3175,9 +3332,15 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->netdev_ops = &vxge_netdev_ops;
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
+ INIT_WORK(&vdev->reset_task, vxge_reset);
vxge_initialize_ethtool_ops(ndev);
+ if (vdev->config.rth_steering != NO_STEERING) {
+ ndev->features |= NETIF_F_RXHASH;
+ hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+ }
+
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
@@ -3191,7 +3354,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->features |= NETIF_F_SG;
- ndev->features |= NETIF_F_HW_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksuming enabled", __func__);
@@ -3227,6 +3390,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
"%s: Ethernet device registered",
ndev->name);
+ hldev->ndev = ndev;
*vdev_out = vdev;
/* Resetting the Device stats */
@@ -3261,36 +3425,29 @@ _out0:
*
* This function will unregister and free network device
*/
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
struct net_device *dev;
char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
dev = hldev->ndev;
vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+ __func__, __LINE__);
+
+ strncpy(buf, dev->name, IFNAMSIZ);
+
+ flush_work_sync(&vdev->reset_task);
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
- flush_scheduled_work();
-
- vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d Exiting...", buf, __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+ buf);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
+ __func__, __LINE__);
}
/*
@@ -3304,7 +3461,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
enum vxge_hw_event type, u64 vp_id)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath = NULL;
int vpath_idx;
@@ -3527,9 +3684,9 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].tti.timer_ac_en =
VXGE_HW_TIM_TIMER_AC_ENABLE;
- /* For msi-x with napi (each vector
- has a handler of its own) -
- Set CI to OFF for all vpaths */
+ /* For msi-x with napi (each vector has a handler of its own) -
+ * Set CI to OFF for all vpaths
+ */
device_config->vp_config[i].tti.timer_ci_en =
VXGE_HW_TIM_TIMER_CI_DISABLE;
@@ -3559,10 +3716,13 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].ring.ring_blocks =
VXGE_HW_DEF_RING_BLOCKS;
+
device_config->vp_config[i].ring.buffer_mode =
VXGE_HW_RING_RXD_BUFFER_MODE_1;
+
device_config->vp_config[i].ring.rxds_limit =
VXGE_HW_DEF_RING_RXDS_LIMIT;
+
device_config->vp_config[i].ring.scatter_mode =
VXGE_HW_RING_SCATTER_MODE_A;
@@ -3642,6 +3802,7 @@ static void __devinit vxge_device_config_init(
device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
break;
}
+
/* Timer period between device poll */
device_config->device_poll_millis = VXGE_TIMER_DELAY;
@@ -3653,16 +3814,10 @@ static void __devinit vxge_device_config_init(
vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
__func__);
- vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
- device_config->dma_blockpool_initial);
- vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
- device_config->dma_blockpool_max);
vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
device_config->intr_mode);
vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
- device_config->rts_mac_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
device_config->rth_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3751,9 +3906,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
vxge_debug_init(VXGE_TRACE,
"%s: MAC Address learning enabled", vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Rx doorbell mode enabled", vdev->ndev->name);
-
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!vxge_bVALn(vpath_mask, i, 1))
continue;
@@ -3766,14 +3918,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
((struct __vxge_hw_device *)(vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
- vxge_debug_init(VXGE_TRACE,
- "%s: Ring blocks : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
- config.vp_config[i].ring.ring_blocks);
- vxge_debug_init(VXGE_TRACE,
- "%s: Fifo blocks : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
- config.vp_config[i].fifo.fifo_blocks);
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
((struct __vxge_hw_device *)(vdev->devh))->
@@ -3813,8 +3957,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
netif_device_detach(netdev);
@@ -3843,8 +3986,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
struct vxgedev *vdev = netdev_priv(netdev);
@@ -3855,7 +3997,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
- vxge_reset(vdev);
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -3869,8 +4011,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
*/
static void vxge_io_resume(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
if (netif_running(netdev)) {
@@ -3914,6 +4055,156 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
return num_functions;
}
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+ struct __vxge_hw_device *hldev = vdev->devh;
+ u32 maj, min, bld, cmaj, cmin, cbld;
+ enum vxge_hw_status status;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+ VXGE_DRIVER_NAME, fw_name);
+ goto out;
+ }
+
+ /* Load the new firmware onto the adapter */
+ status = vxge_update_fw_image(hldev, fw->data, fw->size);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: FW image download to adapter failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Read the version of the new firmware */
+ status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Upgrade read version failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ cmaj = vdev->config.device_hw_info.fw_version.major;
+ cmin = vdev->config.device_hw_info.fw_version.minor;
+ cbld = vdev->config.device_hw_info.fw_version.build;
+ /* It's possible the version in /lib/firmware is not the latest version.
+ * If so, we could get into a loop of trying to upgrade to the latest
+ * and flashing the older version.
+ */
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+ !override) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+ maj, min, bld);
+
+ /* Flash the adapter with the new firmware */
+ status = vxge_hw_flash_fw(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
+ "hard reset before using, thus requiring a system reboot or a "
+ "hotplug event.\n");
+
+out:
+ return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+ u32 maj, min, bld;
+ int ret, gpxe = 0;
+ char *fw_name;
+
+ maj = vdev->config.device_hw_info.fw_version.major;
+ min = vdev->config.device_hw_info.fw_version.minor;
+ bld = vdev->config.device_hw_info.fw_version.build;
+
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+ return 0;
+
+ /* Ignore the build number when determining if the current firmware is
+ * "too new" to load the driver
+ */
+ if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+ "version, unable to load driver\n",
+ VXGE_DRIVER_NAME);
+ return -EINVAL;
+ }
+
+ /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+ * work with this driver.
+ */
+ if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+ "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ /* If file not specified, determine gPXE or not */
+ if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+ int i;
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+ if (vdev->devh->eprom_versions[i]) {
+ gpxe = 1;
+ break;
+ }
+ }
+ if (gpxe)
+ fw_name = "vxge/X3fw-pxe.ncf";
+ else
+ fw_name = "vxge/X3fw.ncf";
+
+ ret = vxge_fw_upgrade(vdev, fw_name, 0);
+ /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+ * probe, so ignore them
+ */
+ if (ret != -EINVAL && ret != -ENOENT)
+ return -EIO;
+ else
+ ret = 0;
+
+ if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+ VXGE_FW_VER(maj, min, 0)) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+ " be used with this driver.\n"
+ "Please get the latest version from "
+ "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __devinit is_sriov_initialized(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE)
+ return 1;
+ }
+ return 0;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4219,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
static int __devinit
vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret;
int high_dma = 0;
@@ -3951,9 +4242,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
attr.pdev = pdev;
/* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses */
- if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
- (device != PCI_SLOT(pdev->devfn))))
+ * can be deployed on different buses
+ */
+ if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
+ !pdev->is_virtfn)
new_device = 1;
bus = pdev->bus->number;
@@ -3971,6 +4263,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
}
+
/* Now making the CPU based no of vpath calculation
* applicable for individual functions as well.
*/
@@ -3993,11 +4286,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+ ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
if (!ll_config) {
ret = -ENOMEM;
vxge_debug_init(VXGE_ERR,
- "ll_config : malloc failed %s %d",
+ "device_config : malloc failed %s %d",
__FILE__, __LINE__);
goto _exit0;
}
@@ -4041,7 +4334,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit1;
}
- if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
+ if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
vxge_debug_init(VXGE_ERR,
"%s : request regions failed", __func__);
ret = -ENODEV;
@@ -4072,16 +4365,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
- if (ll_config->device_hw_info.fw_version.major !=
- VXGE_DRIVER_FW_VERSION_MAJOR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Incorrect firmware version."
- "Please upgrade the firmware to version 1.x.x",
- VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
vpath_mask = ll_config->device_hw_info.vpath_mask;
if (vpath_mask == 0) {
vxge_debug_ll_config(VXGE_TRACE,
@@ -4110,14 +4393,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
num_vfs = vxge_get_num_vfs(function_mode) - 1;
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && (max_config_dev > 1) &&
- (ll_config->intr_type != INTA) &&
- (is_privileged == VXGE_HW_OK)) {
- ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
- ? (max_config_dev - 1) : num_vfs);
+ if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
+ (ll_config->intr_type != INTA)) {
+ ret = pci_enable_sriov(pdev, num_vfs);
if (ret)
vxge_debug_ll_config(VXGE_ERR,
"Failed in enabling SRIOV mode: %d\n", ret);
+ /* No need to fail out, as an error here is non-fatal */
}
/*
@@ -4145,11 +4427,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
+ if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+ ll_config->device_hw_info.fw_version.minor,
+ ll_config->device_hw_info.fw_version.build) >=
+ VXGE_EPROM_FW_VER) {
+ struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+ status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+ VXGE_DRIVER_NAME);
+ /* This is a non-fatal error, continue */
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ hldev->eprom_versions[i] = img[i].version;
+ if (!img[i].is_valid)
+ break;
+ vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+ "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+ VXGE_EPROM_IMG_MAJOR(img[i].version),
+ VXGE_EPROM_IMG_MINOR(img[i].version),
+ VXGE_EPROM_IMG_FIX(img[i].version),
+ VXGE_EPROM_IMG_BUILD(img[i].version));
+ }
+ }
+
/* if FCS stripping is not disabled in MAC fail driver load */
- if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FCS stripping is not disabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
+ status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+ " failing driver load", VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit4;
}
@@ -4163,28 +4471,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
- ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv4 = 1;
+ ll_config->rth_hash_type_ipv4 = 0;
+ ll_config->rth_hash_type_tcpipv6 = 0;
+ ll_config->rth_hash_type_ipv6 = 0;
+ ll_config->rth_hash_type_tcpipv6ex = 0;
+ ll_config->rth_hash_type_ipv6ex = 0;
ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
- &vdev)) {
+ ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+ &vdev);
+ if (ret) {
ret = -EINVAL;
goto _exit4;
}
+ ret = vxge_probe_fw_update(vdev);
+ if (ret)
+ goto _exit5;
+
vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
vxge_hw_device_trace_level_get(hldev));
/* set private HW device info */
- hldev->ndev = vdev->ndev;
vdev->mtu = VXGE_HW_DEFAULT_MTU;
vdev->bar0 = attr.bar0;
vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4590,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = (struct vxge_mac_addrs *)
- kzalloc(sizeof(struct vxge_mac_addrs),
- GFP_KERNEL);
+ entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
if (NULL == entry) {
vxge_debug_init(VXGE_ERR,
"%s: mac_addr_list : memory allocation failed",
vdev->ndev->name);
ret = -EPERM;
- goto _exit5;
+ goto _exit6;
}
macaddr = (u8 *)&entry->macaddr;
memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4636,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
kfree(ll_config);
return 0;
-_exit5:
+_exit6:
for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
vxge_device_unregister(hldev);
_exit4:
pci_disable_sriov(pdev);
@@ -4337,7 +4647,7 @@ _exit4:
_exit3:
iounmap(attr.bar0);
_exit2:
- pci_release_regions(pdev);
+ pci_release_region(pdev, 0);
_exit1:
pci_disable_device(pdev);
_exit0:
@@ -4354,34 +4664,25 @@ _exit0:
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
*/
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
struct vxgedev *vdev = NULL;
struct net_device *dev;
int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ hldev = pci_get_drvdata(pdev);
if (hldev == NULL)
return;
+
dev = hldev->ndev;
vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s:%d", __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(level_trace,
- "%s : removing PCI device...", __func__);
+ vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+ __func__);
vxge_device_unregister(hldev);
for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4394,21 +4695,19 @@ vxge_remove(struct pci_dev *pdev)
iounmap(vdev->bar0);
- pci_disable_sriov(pdev);
-
/* we are safe to free it now */
free_netdev(dev);
- vxge_debug_init(level_trace,
- "%s:%d Device unregistered", __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+ __func__, __LINE__);
vxge_hw_device_terminate(hldev);
pci_disable_device(pdev);
- pci_release_regions(pdev);
+ pci_release_region(pdev, 0);
pci_set_drvdata(pdev, NULL);
- vxge_debug_entryexit(level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
+ __LINE__);
}
static struct pci_error_handlers vxge_err_handler = {
@@ -4444,6 +4743,10 @@ vxge_starter(void)
return -ENOMEM;
ret = pci_register_driver(&vxge_driver);
+ if (ret) {
+ kfree(driver_config);
+ goto err;
+ }
if (driver_config->config_dev_cnt &&
(driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4451,10 +4754,7 @@ vxge_starter(void)
"%s: Configured %d of %d devices",
VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
driver_config->total_dev_cnt);
-
- if (ret)
- kfree(driver_config);
-
+err:
return ret;
}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..5746fedc356f 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
#define PCI_DEVICE_ID_TITAN_WIN 0x5733
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION 1
+#define VXGE_HW_TITAN1A_PCI_REVISION 2
+
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
#define VXGE_TTI_BTIMER_VAL 250000
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_TTI_LTIMER_VAL 1000
+#define VXGE_T1A_TTI_LTIMER_VAL 80
+#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_T1A_TTI_RTIMER_VAL 400
+#define VXGE_RTI_BTIMER_VAL 250
+#define VXGE_RTI_LTIMER_VAL 100
+#define VXGE_RTI_RTIMER_VAL 0
#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
#define TTI_TX_UFC_B 40
#define TTI_TX_UFC_C 60
#define TTI_TX_UFC_D 100
+#define TTI_T1A_TX_UFC_A 30
+#define TTI_T1A_TX_UFC_B 80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A 5
+#define RTI_RX_URANGE_B 15
+#define RTI_RX_URANGE_C 40
+#define RTI_T1A_RX_URANGE_A 1
+#define RTI_T1A_RX_URANGE_B 20
+#define RTI_T1A_RX_URANGE_C 50
+#define RTI_RX_UFC_A 1
+#define RTI_RX_UFC_B 5
+#define RTI_RX_UFC_C 10
+#define RTI_RX_UFC_D 15
+#define RTI_T1A_RX_UFC_B 20
+#define RTI_T1A_RX_UFC_C 50
+#define RTI_T1A_RX_UFC_D 60
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
int addr_learn_en;
- int rth_steering;
- int rth_algorithm;
- int rth_hash_type_tcpipv4;
- int rth_hash_type_ipv4;
- int rth_hash_type_tcpipv6;
- int rth_hash_type_ipv6;
- int rth_hash_type_tcpipv6ex;
- int rth_hash_type_ipv6ex;
- int rth_bkt_sz;
+ u32 rth_steering:2,
+ rth_algorithm:2,
+ rth_hash_type_tcpipv4:1,
+ rth_hash_type_ipv4:1,
+ rth_hash_type_tcpipv6:1,
+ rth_hash_type_ipv6:1,
+ rth_hash_type_tcpipv6ex:1,
+ rth_hash_type_ipv6ex:1,
+ rth_bkt_sz:8;
int rth_jhash_golden_ratio;
int tx_steering_type;
int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
*/
int driver_id;
- /* copy of the flag indicating whether rx_csum is to be used */
- u32 rx_csum;
+ /* copy of the flag indicating whether rx_csum is to be used */
+ u32 rx_csum:1,
+ rx_hwts:1;
int pkts_processed;
int budget;
@@ -281,8 +305,8 @@ struct vxge_vpath {
int is_configured;
int is_open;
struct vxgedev *vdev;
- u8 (macaddr)[ETH_ALEN];
- u8 (macmask)[ETH_ALEN];
+ u8 macaddr[ETH_ALEN];
+ u8 macmask[ETH_ALEN];
#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
/* mac addresses currently programmed into NIC */
@@ -327,7 +351,9 @@ struct vxgedev {
u16 all_multi_flg;
/* A flag indicating whether rx_csum is to be used or not. */
- u32 rx_csum;
+ u32 rx_csum:1,
+ rx_hwts:1,
+ titan1:1;
struct vxge_msix_entry *vxge_entries;
struct msix_entry *entries;
@@ -369,6 +395,7 @@ struct vxgedev {
u32 level_err;
u32 level_trace;
char fw_version[VXGE_HW_FW_STRLEN];
+ struct work_struct reset_task;
};
struct vxge_rx_priv {
@@ -387,8 +414,6 @@ struct vxge_tx_priv {
static int p = val; \
module_param(p, int, 0)
-#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
-
#define vxge_os_timer(timer, handle, arg, exp) do { \
init_timer(&timer); \
timer.function = handle; \
@@ -396,7 +421,10 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+void vxge_initialize_ethtool_ops(struct net_device *ndev);
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
+#define VXGE_HW_FW_API_GET_EPROM_REV 31
+
+#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE 29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO 13
+#define VXGE_HW_FW_UPGRADE_ACTION 16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
+
#define VXGE_HW_ASIC_MODE_RESERVED 0
#define VXGE_HW_ASIC_MODE_NO_IOV 1
#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
vxge_bVALn(bits, 48, 16)
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
/*0x00a78*/ u64 prc_cfg7;
#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611a6842..42cc29843ac7 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
#include "vxge-config.h"
#include "vxge-main.h"
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
- u32 vp_id, enum vxge_hw_event type);
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
}
/**
+ * __vxge_hw_device_handle_error - Handle error
+ * @hldev: HW device
+ * @vp_id: Vpath Id
+ * @type: Error type. Please see enum vxge_hw_event{}
+ *
+ * Handle error.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+ enum vxge_hw_event type)
+{
+ switch (type) {
+ case VXGE_HW_EVENT_UNKNOWN:
+ break;
+ case VXGE_HW_EVENT_RESET_START:
+ case VXGE_HW_EVENT_RESET_COMPLETE:
+ case VXGE_HW_EVENT_LINK_DOWN:
+ case VXGE_HW_EVENT_LINK_UP:
+ goto out;
+ case VXGE_HW_EVENT_ALARM_CLEARED:
+ goto out;
+ case VXGE_HW_EVENT_ECCERR:
+ case VXGE_HW_EVENT_MRPCIM_ECCERR:
+ goto out;
+ case VXGE_HW_EVENT_FIFO_ERR:
+ case VXGE_HW_EVENT_VPATH_ERR:
+ case VXGE_HW_EVENT_CRITICAL_ERR:
+ case VXGE_HW_EVENT_SERR:
+ break;
+ case VXGE_HW_EVENT_SRPCIM_SERR:
+ case VXGE_HW_EVENT_MRPCIM_SERR:
+ goto out;
+ case VXGE_HW_EVENT_SLOT_FREEZE:
+ break;
+ default:
+ vxge_assert(0);
+ goto out;
+ }
+
+ /* notify driver */
+ if (hldev->uld_callbacks.crit_err)
+ hldev->uld_callbacks.crit_err(
+ (struct __vxge_hw_device *)hldev,
+ type, vp_id);
+out:
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
+ *
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_DOWN)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_DOWN;
+
+ /* notify driver */
+ if (hldev->uld_callbacks.link_down)
+ hldev->uld_callbacks.link_down(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
+ *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_UP)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_UP;
+
+ /* notify driver */
+ if (hldev->uld_callbacks.link_up)
+ hldev->uld_callbacks.link_up(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
+{
+ u64 val64;
+ u64 alarm_status;
+ u64 pic_status;
+ struct __vxge_hw_device *hldev = NULL;
+ enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+ u64 mask64;
+ struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath == NULL) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out2;
+ }
+
+ hldev = vpath->hldev;
+ vp_reg = vpath->vp_reg;
+ alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+ if (alarm_status == VXGE_HW_ALL_FOXES) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+ alarm_event);
+ goto out;
+ }
+
+ sw_stats = vpath->sw_stats;
+
+ if (alarm_status & ~(
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+ sw_stats->error_stats.unknown_alarms++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out;
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+ val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+ if (val64 &
+ VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+ val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+ ))) {
+ sw_stats->error_stats.network_sustained_fault++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_down_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+ }
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+ ))) {
+
+ sw_stats->error_stats.network_sustained_ok++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_up_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_UP, alarm_event);
+ }
+
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_reg);
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+ if (skip_alarms)
+ return VXGE_HW_OK;
+ }
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+ pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+ val64 = readq(&vp_reg->general_errors_reg);
+ mask64 = readq(&vp_reg->general_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+ ~mask64) {
+ sw_stats->error_stats.ini_serr_det++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_SERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+ ~mask64) {
+ sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+ ~mask64)
+ sw_stats->error_stats.statsb_pif_chain_error++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+ ~mask64)
+ sw_stats->error_stats.statsb_drop_timeout++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+ ~mask64)
+ sw_stats->error_stats.target_illegal_access++;
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->general_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+ val64 = readq(&vp_reg->kdfcctl_errors_reg);
+ mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->kdfcctl_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+ val64 = readq(&vp_reg->wrdma_alarm_status);
+
+ if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+ val64 = readq(&vp_reg->prc_alarm_reg);
+ mask64 = readq(&vp_reg->prc_alarm_mask);
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+ ~mask64)
+ sw_stats->error_stats.prc_ring_bumps++;
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+ & ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+ & ~mask64) {
+ sw_stats->error_stats.prc_quanta_size_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->prc_alarm_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+ }
+out:
+ hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+ if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+ (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+ return VXGE_HW_OK;
+
+ __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+ if (alarm_event == VXGE_HW_EVENT_SERR)
+ return VXGE_HW_ERR_CRITICAL;
+
+ return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+ VXGE_HW_ERR_SLOT_FREEZE :
+ (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+ VXGE_HW_ERR_VPATH;
+}
+
+/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
* @skip_alarms: Do not clear the alarms
@@ -513,108 +884,6 @@ exit:
return ret;
}
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks.link_up)
- hldev->uld_callbacks.link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks.link_down)
- hldev->uld_callbacks.link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks.crit_err)
- hldev->uld_callbacks.crit_err(
- (struct __vxge_hw_device *)hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
/**
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
* condition that has caused the Tx and RX interrupt.
@@ -699,8 +968,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
-static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
- void *dtrh)
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
*/
void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
wmb();
vxge_hw_ring_rxd_post_post(ring, rxdh);
}
@@ -1868,284 +2133,6 @@ exit:
}
/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/*
* vxge_hw_vpath_alarm_process - Process Alarms.
* @vpath: Virtual Path.
* @skip_alarms: Do not clear the alarms
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d0..8c3103fb6442 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
};
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
- VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
- VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
- VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
- VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
-};
-
enum vxge_hw_status vxge_hw_ring_rxd_reserve(
struct __vxge_hw_ring *ring_handle,
void **rxdh);
@@ -2109,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
-/* ========================= FIFO PRIVATE API ============================= */
-
-struct vxge_hw_fifo_attr;
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2186,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN],
+ u8 *macaddr,
+ u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_vid_add(
@@ -2313,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..ad2f99b9bcf3 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -15,8 +15,35 @@
#define VXGE_VERSION_H
#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "9"
-#define VXGE_VERSION_BUILD "20840"
+#define VXGE_VERSION_MINOR "5"
+#define VXGE_VERSION_FIX "1"
+#define VXGE_VERSION_BUILD "22082"
#define VXGE_VERSION_FOR "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+ VXGE_DEAD_FW_VER_MINOR, \
+ VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR 1
+#define VXGE_EPROM_FW_VER_MINOR 6
+#define VXGE_EPROM_FW_VER_BUILD 1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+ VXGE_EPROM_FW_VER_MINOR, \
+ VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+ VXGE_CERT_FW_VER_MINOR, \
+ VXGE_CERT_FW_VER_BUILD)
+
#endif
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index d45b08d1dbc9..34cff6ce6d27 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1358,7 +1358,7 @@ static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return ret;
}
-static int dscc4_match(struct thingie *p, int value)
+static int dscc4_match(const struct thingie *p, int value)
{
int i;
@@ -1403,7 +1403,7 @@ done:
static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
struct net_device *dev)
{
- struct thingie encoding[] = {
+ static const struct thingie encoding[] = {
{ ENCODING_NRZ, 0x00000000 },
{ ENCODING_NRZI, 0x00200000 },
{ ENCODING_FM_MARK, 0x00400000 },
@@ -1442,7 +1442,7 @@ static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
struct net_device *dev)
{
- struct thingie crc[] = {
+ static const struct thingie crc[] = {
{ PARITY_CRC16_PR0_CCITT, 0x00000010 },
{ PARITY_CRC16_PR1_CCITT, 0x00000000 },
{ PARITY_CRC32_PR0_CCITT, 0x00000011 },
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index ea476cbd38b5..e305274f83fb 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -293,6 +293,7 @@ static inline void sca_tx_done(port_t *port)
struct net_device *dev = port->netdev;
card_t* card = port->card;
u8 stat;
+ unsigned count = 0;
spin_lock(&port->lock);
@@ -316,10 +317,12 @@ static inline void sca_tx_done(port_t *port)
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
+ count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
- netif_wake_queue(dev);
+ if (count)
+ netif_wake_queue(dev);
spin_unlock(&port->lock);
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..cf05504d9511 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
static int x25_asy_close(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
- int err;
spin_lock(&sl->lock);
if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
- err);
spin_unlock(&sl->lock);
return 0;
}
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
static void x25_asy_close_tty(struct tty_struct *tty)
{
struct x25_asy *sl = tty->disc_data;
+ int err;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
dev_close(sl->dev);
rtnl_unlock();
+ err = lapb_unregister(sl->dev);
+ if (err != LAPB_OK)
+ printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+ err);
+
tty->disc_data = NULL;
sl->tty = NULL;
x25_asy_free(sl);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index f1549fff0edc..8831a3393ecf 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -275,7 +275,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr+WD_NIC_OFFSET;
if (dev->irq < 2) {
- int irqmap[] = {9,3,5,7,10,11,15,4};
+ static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4};
int reg1 = inb(ioaddr+1);
int reg4 = inb(ioaddr+4);
if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index cdedab46ba21..f0603327aafa 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@ MODULE_PARM_DESC(barkers,
"signal; values are appended to a list--setting one value "
"as zero cleans the existing list and starts a new one.");
-static
-struct i2400m_work *__i2400m_work_setup(
- struct i2400m *i2400m, void (*fn)(struct work_struct *),
- gfp_t gfp_flags, const void *pl, size_t pl_size)
-{
- struct i2400m_work *iw;
-
- iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
- if (iw == NULL)
- return NULL;
- iw->i2400m = i2400m_get(i2400m);
- iw->pl_size = pl_size;
- memcpy(iw->pl, pl, pl_size);
- INIT_WORK(&iw->ws, fn);
- return iw;
-}
-
-
-/*
- * Schedule i2400m's specific work on the system's queue.
- *
- * Used for a few cases where we really need it; otherwise, identical
- * to i2400m_queue_work().
- *
- * Returns < 0 errno code on error, 1 if ok.
- *
- * If it returns zero, something really bad happened, as it means the
- * works struct was already queued, but we have just allocated it, so
- * it should not happen.
- */
-static int i2400m_schedule_work(struct i2400m *i2400m,
- void (*fn)(struct work_struct *), gfp_t gfp_flags,
- const void *pl, size_t pl_size)
-{
- int result;
- struct i2400m_work *iw;
-
- result = -ENOMEM;
- iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
- if (iw != NULL) {
- result = schedule_work(&iw->ws);
- if (WARN_ON(result == 0))
- result = -ENXIO;
- }
- return result;
-}
-
-
/*
* WiMAX stack operation: relay a message from user space
*
@@ -648,17 +600,11 @@ EXPORT_SYMBOL_GPL(i2400m_post_reset);
static
void __i2400m_dev_reset_handle(struct work_struct *ws)
{
- int result;
- struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
- const char *reason;
- struct i2400m *i2400m = iw->i2400m;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
+ const char *reason = i2400m->reset_reason;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
-
- if (WARN_ON(iw->pl_size != sizeof(reason)))
- reason = "SW BUG: reason n/a";
- else
- memcpy(&reason, iw->pl, sizeof(reason));
+ int result;
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
@@ -733,8 +679,6 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
}
}
out:
- i2400m_put(i2400m);
- kfree(iw);
d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
ws, i2400m, reason);
}
@@ -754,8 +698,8 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{
- return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
- GFP_ATOMIC, &reason, sizeof(reason));
+ i2400m->reset_reason = reason;
+ return schedule_work(&i2400m->reset_ws);
}
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
@@ -768,14 +712,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
static
void __i2400m_error_recovery(struct work_struct *ws)
{
- struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
- struct i2400m *i2400m = iw->i2400m;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
i2400m_reset(i2400m, I2400M_RT_BUS);
-
- i2400m_put(i2400m);
- kfree(iw);
- return;
}
/*
@@ -805,18 +744,10 @@ void __i2400m_error_recovery(struct work_struct *ws)
*/
void i2400m_error_recovery(struct i2400m *i2400m)
{
- struct device *dev = i2400m_dev(i2400m);
-
- if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
- if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
- GFP_ATOMIC, NULL, 0) < 0) {
- dev_err(dev, "run out of memory for "
- "scheduling an error recovery ?\n");
- atomic_dec(&i2400m->error_recovery);
- }
- } else
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1)
+ schedule_work(&i2400m->recovery_ws);
+ else
atomic_dec(&i2400m->error_recovery);
- return;
}
EXPORT_SYMBOL_GPL(i2400m_error_recovery);
@@ -886,6 +817,10 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */
+
+ INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
+ INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
+
atomic_set(&i2400m->bus_reset_retries, 0);
i2400m->alive = 0;
@@ -1040,6 +975,9 @@ void i2400m_release(struct i2400m *i2400m)
i2400m_dev_stop(i2400m);
+ cancel_work_sync(&i2400m->reset_ws);
+ cancel_work_sync(&i2400m->recovery_ws);
+
i2400m_debugfs_rm(i2400m);
sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
&i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@ module_init(i2400m_driver_init);
static
void __exit i2400m_driver_exit(void)
{
- /* for scheds i2400m_dev_reset_handle() */
- flush_scheduled_work();
i2400m_barker_db_exit();
}
module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 59ac7705e76e..17ecaa41a807 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -632,6 +632,11 @@ struct i2400m {
struct work_struct wake_tx_ws;
struct sk_buff *wake_tx_skb;
+ struct work_struct reset_ws;
+ const char *reset_reason;
+
+ struct work_struct recovery_ws;
+
struct dentry *debugfs_dentry;
const char *fw_name; /* name of the current firmware image */
unsigned long fw_version; /* version of the firmware interface */
@@ -896,20 +901,6 @@ struct device *i2400m_dev(struct i2400m *i2400m)
return i2400m->wimax_dev.net_dev->dev.parent;
}
-/*
- * Helper for scheduling simple work functions
- *
- * This struct can get any kind of payload attached (normally in the
- * form of a struct where you pack the stuff you want to pass to the
- * _work function).
- */
-struct i2400m_work {
- struct work_struct ws;
- struct i2400m *i2400m;
- size_t pl_size;
- u8 pl[0];
-};
-
extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
char *, size_t);
extern int i2400m_msg_size_check(struct i2400m *,
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 9bfc26e1bc6b..be428cae28d8 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -590,7 +590,6 @@ module_init(i2400ms_driver_init);
static
void __exit i2400ms_driver_exit(void)
{
- flush_scheduled_work(); /* for the stuff we schedule */
sdio_unregister_driver(&i2400m_sdio_driver);
}
module_exit(i2400ms_driver_exit);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d3365ac85dde..10e3ab352175 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -780,7 +780,6 @@ module_init(i2400mu_driver_init);
static
void __exit i2400mu_driver_exit(void)
{
- flush_scheduled_work(); /* for the stuff we schedule from sysfs.c */
usb_deregister(&i2400mu_driver);
}
module_exit(i2400mu_driver_exit);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d7deae85d980..466d2bf02eab 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -62,6 +62,7 @@
static int ar9003_hw_power_interpolate(int32_t x,
int32_t *px, int32_t *py, u_int16_t np);
+
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dbb986946e1a..18d63f57777d 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -858,7 +858,10 @@ void hostap_free_data(struct ap_data *ap)
return;
}
+ flush_work_sync(&ap->add_sta_proc_queue);
+
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+ flush_work_sync(&ap->wds_oper_queue);
if (ap->crypt)
ap->crypt->deinit(ap->crypt_priv);
ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b7cb165d612b..a8bddd81b4d1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3317,7 +3317,13 @@ static void prism2_free_local_data(struct net_device *dev)
unregister_netdev(local->dev);
- flush_scheduled_work();
+ flush_work_sync(&local->reset_queue);
+ flush_work_sync(&local->set_multicast_list_queue);
+ flush_work_sync(&local->set_tim_queue);
+#ifndef PRISM2_NO_STATION_MODES
+ flush_work_sync(&local->info_queue);
+#endif
+ flush_work_sync(&local->comms_qual_update);
lib80211_crypt_info_free(&local->crypt_info);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ade30251608e..6f383cd684b0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -209,9 +209,6 @@ config RT2X00_LIB_LEDS
boolean
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
- depends on RT2X00_LIB=y && LEDS_CLASS=m
-
config RT2X00_LIB_DEBUGFS
bool "Ralink debugfs support"
depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 30f8d404958b..6a9b66051cf7 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
/* Allocate a single memory block for values and addresses. */
count16 = 2*count;
+ /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
GFP_KERNEL);
if (!a16) {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca68..de6c3086d232 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -515,7 +515,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
*/
static int xemaclite_set_mac_address(struct net_device *dev, void *address)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sockaddr *addr = address;
if (netif_running(dev))
@@ -534,7 +534,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*/
static void xemaclite_tx_timeout(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
unsigned long flags;
dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
*/
static void xemaclite_tx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
if (lp->deferred_skb) {
@@ -605,7 +605,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
*/
static void xemaclite_rx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int align;
u32 len;
@@ -661,7 +661,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
bool tx_complete = 0;
struct net_device *dev = dev_id;
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
u32 tx_status;
@@ -918,7 +918,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
*/
static int xemaclite_open(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
int retval;
/* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@ static int xemaclite_open(struct net_device *dev)
*/
static int xemaclite_close(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
@@ -1001,21 +1001,6 @@ static int xemaclite_close(struct net_device *dev)
}
/**
- * xemaclite_get_stats - Get the stats for the net_device
- * @dev: Pointer to the network device
- *
- * This function returns the address of the 'net_device_stats' structure for the
- * given network device. This structure holds usage statistics for the network
- * device.
- *
- * Return: Pointer to the net_device_stats structure.
- */
-static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
-/**
* xemaclite_send - Transmit a frame
* @orig_skb: Pointer to the socket buffer to be transmitted
* @dev: Pointer to the network device
@@ -1031,7 +1016,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
*/
static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
unsigned int len;
unsigned long flags;
@@ -1068,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
if (lp->base_addr)
iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1230,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
/* Un-register the mii_bus, if configured */
if (lp->has_mdio) {
@@ -1285,7 +1270,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
- .ndo_get_stats = xemaclite_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
#define TX_BUF_SIZE 8192
#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
struct znet_private {
int rx_dma, tx_dma;