summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/3c59x.c166
-rw-r--r--drivers/net/8139cp.c35
-rw-r--r--drivers/net/8139too.c39
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig43
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/b44.c27
-rw-r--r--drivers/net/bnx2.c81
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/cassini.c25
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/declance.c7
-rw-r--r--drivers/net/dl2k.c43
-rw-r--r--drivers/net/dm9000.c18
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000.h13
-rw-r--r--drivers/net/e1000/e1000_ethtool.c141
-rw-r--r--drivers/net/e1000/e1000_hw.c1819
-rw-r--r--drivers/net/e1000/e1000_hw.h386
-rw-r--r--drivers/net/e1000/e1000_main.c456
-rw-r--r--drivers/net/e1000/e1000_osdep.h13
-rw-r--r--drivers/net/e1000/e1000_param.c199
-rw-r--r--drivers/net/e2100.c4
-rw-r--r--drivers/net/eepro.c3
-rw-r--r--drivers/net/eepro100.c6
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/epic100.c93
-rw-r--r--drivers/net/es3210.c3
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/fealnx.c38
-rw-r--r--drivers/net/forcedeth.c51
-rw-r--r--drivers/net/fs_enet/Makefile6
-rw-r--r--drivers/net/fs_enet/fec.h42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c207
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c505
-rw-r--r--drivers/net/fs_enet/fs_enet.h40
-rw-r--r--drivers/net/fs_enet/mac-fcc.c32
-rw-r--r--drivers/net/fs_enet/mac-fec.c142
-rw-r--r--drivers/net/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c448
-rw-r--r--drivers/net/fs_enet/mii-fec.c243
-rw-r--r--drivers/net/fs_enet/mii-fixed.c91
-rw-r--r--drivers/net/gt96100eth.c3
-rw-r--r--drivers/net/gt96100eth.h2
-rw-r--r--drivers/net/hamachi.c16
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/irda/ali-ircc.c3
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c60
-rw-r--r--drivers/net/natsemi.c117
-rw-r--r--drivers/net/ne2k-pci.c9
-rw-r--r--drivers/net/ni5010.c52
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/ns83820.c41
-rw-r--r--drivers/net/pci-skeleton.c19
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c18
-rw-r--r--drivers/net/pcnet32.c545
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/cicada.c42
-rw-r--r--drivers/net/phy/fixed.c358
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c51
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--drivers/net/r8169.c40
-rw-r--r--drivers/net/s2io.c672
-rw-r--r--drivers/net/s2io.h15
-rw-r--r--drivers/net/seeq8005.c2
-rw-r--r--drivers/net/sk98lin/h/xmac_ii.h2
-rw-r--r--drivers/net/skge.c7
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c55
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc911x.c1
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h47
-rw-r--r--drivers/net/spider_net.c584
-rw-r--r--drivers/net/spider_net.h76
-rw-r--r--drivers/net/spider_net_ethtool.c13
-rw-r--r--drivers/net/starfire.c123
-rw-r--r--drivers/net/sundance.c108
-rw-r--r--drivers/net/sunhme.c9
-rw-r--r--drivers/net/sunlance.c35
-rw-r--r--drivers/net/tg3.c171
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/smctr.c5
-rw-r--r--drivers/net/tulip/winbond-840.c31
-rw-r--r--drivers/net/tulip/xircom_cb.c3
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c27
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/ucc_geth.c4278
-rw-r--r--drivers/net/ucc_geth.h1339
-rw-r--r--drivers/net/ucc_geth_phy.c801
-rw-r--r--drivers/net/ucc_geth_phy.h217
-rw-r--r--drivers/net/via-rhine.c207
-rw-r--r--drivers/net/via-velocity.c119
-rw-r--r--drivers/net/via-velocity.h4
-rw-r--r--drivers/net/wan/Kconfig12
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/c101.c33
-rw-r--r--drivers/net/wan/hd6457x.c26
-rw-r--r--drivers/net/wan/hdlc_cisco.c14
-rw-r--r--drivers/net/wan/hdlc_fr.c14
-rw-r--r--drivers/net/wan/hdlc_generic.c65
-rw-r--r--drivers/net/wan/hdlc_ppp.c1
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c1
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wan/wanxl.c5
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h24
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c7
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/spectrum_cs.c39
-rw-r--r--drivers/net/wireless/strip.c6
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig19
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1646
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h827
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h48
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c191
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h85
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c1084
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h193
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.c267
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.h45
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c151
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h82
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c308
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c279
-rw-r--r--drivers/net/wireless/zd1211rw/zd_types.h71
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1309
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h240
-rw-r--r--drivers/net/wireless/zd1211rw/zd_util.c82
-rw-r--r--drivers/net/wireless/zd1211rw/zd_util.h29
-rw-r--r--drivers/net/yellowfin.c39
161 files changed, 19393 insertions, 3887 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4532b17e40ea..aedfddf20cb3 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry;
- unsigned long flags, i;
+ unsigned long flags;
+ int i;
if (vp->tx_full) /* No room to transmit with */
return 1;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 2819de79442c..80e8ca013e44 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -17,172 +17,6 @@
410 Severn Ave., Suite 210
Annapolis MD 21403
- Linux Kernel Additions:
-
- 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
- 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com>
- Remove compatibility defines for kernel versions < 2.2.x.
- Update for new 2.3.x module interface
- LK1.1.2 (March 19, 2000)
- * New PCI interface (jgarzik)
-
- LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
- - Merged with 3c575_cb.c
- - Don't set RxComplete in boomerang interrupt enable reg
- - spinlock in vortex_timer to protect mdio functions
- - disable local interrupts around call to vortex_interrupt in
- vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
- - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
- - In vortex_start_xmit(), move the lock to _after_ we've altered
- vp->cur_tx and vp->tx_full. This defeats the race between
- vortex_start_xmit() and vortex_interrupt which was identified
- by Bogdan Costescu.
- - Merged back support for six new cards from various sources
- - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
- insertion oops)
- - Tell it that 3c905C has NWAY for 100bT autoneg
- - Fix handling of SetStatusEnd in 'Too much work..' code, as
- per 2.3.99's 3c575_cb (Dave Hinds).
- - Split ISR into two for vortex & boomerang
- - Fix MOD_INC/DEC races
- - Handle resource allocation failures.
- - Fix 3CCFE575CT LED polarity
- - Make tx_interrupt_mitigation the default
-
- LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
- - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
- - Put vortex_info_tbl into __devinitdata
- - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
- as in the hardware.
- - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
-
- LK1.1.5 28 April 2000, andrewm
- - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
- - Some extra diagnostics
- - In vortex_error(), reset the Tx on maxCollisions. Otherwise most
- chips usually get a Tx timeout.
- - Added extra_reset module parm
- - Replaced some inline timer manip with mod_timer
- (Franois romieu <Francois.Romieu@nic.fr>)
- - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
- (this came across from 3c575_cb).
-
- LK1.1.6 06 Jun 2000, andrewm
- - Backed out the PPC defines.
- - Use del_timer_sync(), mod_timer().
- - Fix wrapped ulong comparison in boomerang_rx()
- - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
- (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
- - Replace union wn3_config with BFINS/BFEXT manipulation for
- sparc64 (Pete Zaitcev, Peter Jones)
- - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
- do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
- Donald Becker)
- - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
- - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
-
- LK1.1.7 2 Jul 2000 andrewm
- - Better handling of shared IRQs
- - Reset the transmitter on a Tx reclaim error
- - Fixed crash under OOM during vortex_open() (Mark Hemment)
- - Fix Rx cessation problem during OOM (help from Mark Hemment)
- - The spinlocks around the mdio access were blocking interrupts for 300uS.
- Fix all this to use spin_lock_bh() within mdio_read/write
- - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
- have one.
- - Added 802.3x MAC-layer flow control support
-
- LK1.1.8 13 Aug 2000 andrewm
- - Ignore request_region() return value - already reserved if Cardbus.
- - Merged some additional Cardbus flags from Don's 0.99Qk
- - Some fixes for 3c556 (Fred Maciel)
- - Fix for EISA initialisation (Jan Rekorajski)
- - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
- - Fixed MII_XCVR_PWR for 3CCFE575CT
- - Added INVERT_LED_PWR, used it.
- - Backed out the extra_reset stuff
-
- LK1.1.9 12 Sep 2000 andrewm
- - Backed out the tx_reset_resume flags. It was a no-op.
- - In vortex_error, don't reset the Tx on txReclaim errors
- - In vortex_error, don't reset the Tx on maxCollisions errors.
- Hence backed out all the DownListPtr logic here.
- - In vortex_error, give Tornado cards a partial TxReset on
- maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this.
- - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
- - Fixed a bug where, if vp->tx_full is set when the interface
- is downed, it remains set when the interface is upped. Bad
- things happen.
-
- LK1.1.10 17 Sep 2000 andrewm
- - Added EEPROM_8BIT for 3c555 (Fred Maciel)
- - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
- - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
-
- LK1.1.11 13 Nov 2000 andrewm
- - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
-
- LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
- - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
- - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
- - Added extended issue_and_wait for the 3c905CX.
- - Look for an MII on PHY index 24 first (3c905CX oddity).
- - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
- - Don't free skbs we don't own on oom path in vortex_open().
-
- LK1.1.13 27 Jan 2001
- - Added explicit `medialock' flag so we can truly
- lock the media type down with `options'.
- - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
- - Added and used EEPROM_NORESET for 3c556B PM resumes.
- - Fixed leakage of vp->rx_ring.
- - Break out separate HAS_HWCKSM device capability flag.
- - Kill vp->tx_full (ANK)
- - Merge zerocopy fragment handling (ANK?)
-
- LK1.1.14 15 Feb 2001
- - Enable WOL. Can be turned on with `enable_wol' module option.
- - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
- - If a device's internalconfig register reports it has NWAY,
- use it, even if autoselect is enabled.
-
- LK1.1.15 6 June 2001 akpm
- - Prevent double counting of received bytes (Lars Christensen)
- - Add ethtool support (jgarzik)
- - Add module parm descriptions (Andrzej M. Krzysztofowicz)
- - Implemented alloc_etherdev() API
- - Special-case the 'Tx error 82' message.
-
- LK1.1.16 18 July 2001 akpm
- - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
- - Lessen verbosity of bootup messages
- - Fix WOL - use new PM API functions.
- - Use netif_running() instead of vp->open in suspend/resume.
- - Don't reset the interface logic on open/close/rmmod. It upsets
- autonegotiation, and hence DHCP (from 0.99T).
- - Back out EEPROM_NORESET flag because of the above (we do it for all
- NICs).
- - Correct 3c982 identification string
- - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
- clash.
-
- LK1.1.17 18Dec01 akpm
- - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently.
- And it has NWAY.
- - Mask our advertised modes (vp->advertising) with our capabilities
- (MII reg5) when deciding which duplex mode to use.
- - Add `global_options' as default for options[]. Ditto global_enable_wol,
- global_full_duplex.
-
- LK1.1.18 01Jul02 akpm
- - Fix for undocumented transceiver power-up bit on some 3c566B's
- (Donald Becker, Rahul Karnik)
-
- - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
- - Also see Documentation/networking/vortex.txt
-
- LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org>
- - EISA sysfs integration.
*/
/*
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 1959654cbec8..1428bb7715af 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1836,9 +1836,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
- printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
- pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
- printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n");
+ dev_err(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
+ pdev->vendor, pdev->device, pci_rev);
+ dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
return -ENODEV;
}
@@ -1876,14 +1877,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "no MMIO resource\n");
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
+ dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
+ (unsigned long long)pci_resource_len(pdev, 1));
goto err_out_res;
}
@@ -1897,14 +1897,15 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting.\n");
goto err_out_res;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
- printk(KERN_ERR PFX "No usable consistent DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "No usable consistent DMA configuration, "
+ "aborting.\n");
goto err_out_res;
}
}
@@ -1915,9 +1916,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
regs = ioremap(pciaddr, CP_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%llx) on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1),
- (unsigned long long)pciaddr, pci_name(pdev));
+ dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pciaddr);
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
@@ -1986,7 +1987,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable busmastering and memory-write-invalidate */
pci_set_master(pdev);
- if (cp->wol_enabled) cp_set_d3_state (cp);
+ if (cp->wol_enabled)
+ cp_set_d3_state (cp);
return 0;
@@ -2011,7 +2013,8 @@ static void cp_remove_one (struct pci_dev *pdev)
BUG_ON(!dev);
unregister_netdev(dev);
iounmap(cp->regs);
- if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
+ if (cp->wol_enabled)
+ pci_set_power_state (pdev, PCI_D0);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 717506b2b13a..e4f4eaff7679 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -768,7 +768,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
/* dev and priv zeroed in alloc_etherdev */
dev = alloc_etherdev (sizeof (*tp));
if (dev == NULL) {
- printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev));
+ dev_err(&pdev->dev, "Unable to alloc new net device\n");
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
@@ -800,31 +800,31 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
#ifdef USE_IO_OPS
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
- printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
/* check for weird/broken PCI region reporting */
if (pio_len < RTL_MIN_IO_SIZE) {
- printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
#else
/* make sure PCI base addr 1 is MMIO */
if (!(mmio_flags & IORESOURCE_MEM)) {
- printk (KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
if (mmio_len < RTL_MIN_IO_SIZE) {
- printk (KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
#endif
- rc = pci_request_regions (pdev, "8139too");
+ rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
goto err_out;
disable_dev_on_err = 1;
@@ -835,7 +835,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
#ifdef USE_IO_OPS
ioaddr = ioport_map(pio_start, pio_len);
if (!ioaddr) {
- printk (KERN_ERR PFX "%s: cannot map PIO, aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "cannot map PIO, aborting\n");
rc = -EIO;
goto err_out;
}
@@ -846,7 +846,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
/* ioremap MMIO region */
ioaddr = pci_iomap(pdev, 1, 0);
if (ioaddr == NULL) {
- printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev));
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out;
}
@@ -860,8 +860,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
/* check for missing/broken hardware */
if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
- printk (KERN_ERR PFX "%s: Chip not responding, ignoring board\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Chip not responding, ignoring board\n");
rc = -EIO;
goto err_out;
}
@@ -875,9 +874,10 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
- printk (KERN_DEBUG PFX "%s: unknown chip version, assuming RTL-8139\n",
- pci_name(pdev));
- printk (KERN_DEBUG PFX "%s: TxConfig = 0x%lx\n", pci_name(pdev), RTL_R32 (TxConfig));
+ dev_printk (KERN_DEBUG, &pdev->dev,
+ "unknown chip version, assuming RTL-8139\n");
+ dev_printk (KERN_DEBUG, &pdev->dev,
+ "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
tp->chipset = 0;
match:
@@ -954,9 +954,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) {
- printk(KERN_INFO PFX "pci dev %s (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
- pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
- printk(KERN_INFO PFX "Use the \"8139cp\" driver for improved performance and stability.\n");
+ dev_info(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
+ pdev->vendor, pdev->device, pci_rev);
+ dev_info(&pdev->dev,
+ "Use the \"8139cp\" driver for improved performance and stability.\n");
}
i = rtl8139_init_board (pdev, &dev);
@@ -1707,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
unsigned int entry;
unsigned int len = skb->len;
+ unsigned long flags;
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
@@ -1723,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
return 0;
}
- spin_lock_irq(&tp->lock);
+ spin_lock_irqsave(&tp->lock, flags);
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
@@ -1734,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
netif_stop_queue (dev);
- spin_unlock_irq(&tp->lock);
+ spin_unlock_irqrestore(&tp->lock, flags);
if (netif_msg_tx_queued(tp))
printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 7e2ca9571467..257d3bce3993 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -899,7 +899,7 @@ memory_squeeze:
}
-static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
{
struct i596_cmd *ptr;
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
lp->scb.cmd = I596_NULL;
}
-static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
+static void i596_reset(struct net_device *dev, struct i596_private *lp,
+ int ioaddr)
{
unsigned long flags;
@@ -1578,7 +1579,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
-int init_module(void)
+int __init init_module(void)
{
if (debug >= 0)
i596_debug = debug;
@@ -1588,7 +1589,7 @@ int init_module(void)
return 0;
}
-void cleanup_module(void)
+void __exit cleanup_module(void)
{
unregister_netdev(dev_82596);
#ifdef __mc68000__
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 39189903e355..a2bd8119270e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO
If unsure, say Y.
+config VIA_RHINE_NAPI
+ bool "Use Rx Polling (NAPI)"
+ depends on VIA_RHINE
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
config LAN_SAA9730
bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
depends on NET_PCI && EXPERIMENTAL && MIPS
@@ -2219,6 +2233,33 @@ config GFAR_NAPI
bool "NAPI Support"
depends on GIANFAR
+config UCC_GETH
+ tristate "Freescale QE UCC GETH"
+ depends on QUICC_ENGINE && UCC_FAST
+ help
+ This driver supports the Gigabit Ethernet mode of QE UCC.
+ QE can be found on MPC836x CPUs.
+
+config UGETH_NAPI
+ bool "NAPI Support"
+ depends on UCC_GETH
+
+config UGETH_MAGIC_PACKET
+ bool "Magic Packet detection support"
+ depends on UCC_GETH
+
+config UGETH_FILTERING
+ bool "Mac address filtering support"
+ depends on UCC_GETH
+
+config UGETH_TX_ON_DEMOND
+ bool "Transmit on Demond support"
+ depends on UCC_GETH
+
+config UGETH_HAS_GIGA
+ bool
+ depends on UCC_GETH && MPC836x
+
config MV643XX_ETH
tristate "MV-643XX Ethernet support"
depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
@@ -2352,7 +2393,7 @@ config MYRI10GE
you will need a newer firmware image.
You may get this image or more information, at:
- <http://www.myri.com/Myri-10G/>
+ <http://www.myri.com/scs/download-Myri10GE.html>
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e95126f78..8427bf9dec9d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \
gianfar_mii.o \
gianfar_sysfs.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o
+
#
# link order important here
#
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 7952dc6d77e3..0fbbcb75af69 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
MODULE_LICENSE("GPL");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index b14e89004c3a..0a0e0cd81a23 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -29,7 +29,7 @@ config ATALK
even politically correct people are allowed to say Y here.
config DEV_APPLETALK
- bool "Appletalk interfaces support"
+ tristate "Appletalk interfaces support"
depends on ATALK
help
AppleTalk is the protocol that Apple computers can use to communicate
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1d01ac0000e4..ae7f828344d9 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1030,7 +1030,7 @@ module_param(io, int, 0);
module_param(irq, int, 0);
module_param(board_type, int, 0);
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 5d7929c79bce..4ca061c2d5b2 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk("at1700: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index cd98d31dee8c..bea0fc0ede2f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2120,13 +2120,14 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
+ dev_err(&pdev->dev, "Cannot enable PCI device, "
"aborting.\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
+ dev_err(&pdev->dev,
+ "Cannot find proper PCI device "
"base address, aborting.\n");
err = -ENODEV;
goto err_out_disable_pdev;
@@ -2134,8 +2135,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "Cannot obtain PCI resources, aborting.\n");
goto err_out_disable_pdev;
}
@@ -2143,15 +2144,13 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
goto err_out_free_res;
}
err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
goto err_out_free_res;
}
@@ -2160,7 +2159,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -2181,8 +2180,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
bp->regs = ioremap(b44reg_base, b44reg_len);
if (bp->regs == 0UL) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -2212,8 +2210,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
err = b44_get_invariants(bp);
if (err) {
- printk(KERN_ERR PFX "Problem fetching invariants of chip, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting.\n");
goto err_out_iounmap;
}
@@ -2233,8 +2231,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_iounmap;
}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4f4db5ae503b..652eb05a6c2d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4.43"
-#define DRV_MODULE_RELDATE "June 28, 2006"
+#define DRV_MODULE_VERSION "1.4.44"
+#define DRV_MODULE_RELDATE "August 10, 2006"
#define RUN_AT(x) (jiffies + (x))
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
{
- u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+ u32 diff;
+ smp_mb();
+ diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
if (diff > MAX_TX_DESC_CNT)
diff = (diff & MAX_TX_DESC_CNT) - 1;
return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = dev_alloc_skb(bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (skb == NULL) {
return -ENOMEM;
}
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
skb_reserve(skb, 8 - align);
}
- skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -1639,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp)
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
}
bp->tx_cons = sw_cons;
+ /* Need to make the tx_cons update visible to bnx2_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnx2_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
- if (unlikely(netif_queue_stopped(bp->dev))) {
- spin_lock(&bp->tx_lock);
+ if (unlikely(netif_queue_stopped(bp->dev)) &&
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+ netif_tx_lock(bp->dev);
if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
netif_wake_queue(bp->dev);
- }
- spin_unlock(&bp->tx_lock);
+ netif_tx_unlock(bp->dev);
}
}
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
- new_skb = dev_alloc_skb(len + 2);
+ new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL)
goto reuse_rx;
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
- new_skb->dev = bp->dev;
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
struct tx_bd *txbd;
u32 val;
+ bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
return -EINVAL;
pkt_size = 1514;
- skb = dev_alloc_skb(pkt_size);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
#endif
/* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
*/
static int
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
- spin_lock(&bp->tx_lock);
netif_stop_queue(dev);
-
- if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+ if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
netif_wake_queue(dev);
- spin_unlock(&bp->tx_lock);
}
return NETDEV_TX_OK;
@@ -5575,20 +5577,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
- printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find PCI device base address, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting.\n");
rc = -ENODEV;
goto err_out_disable;
}
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_disable;
}
@@ -5596,15 +5598,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find power management capability, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "Cannot find power management capability, aborting.\n");
rc = -EIO;
goto err_out_release;
}
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
rc = -EIO;
goto err_out_release;
}
@@ -5612,14 +5614,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
- printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
- "failed, aborting.\n");
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed, aborting.\n");
rc = -EIO;
goto err_out_release;
}
}
else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
- printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
+ dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
rc = -EIO;
goto err_out_release;
}
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- spin_lock_init(&bp->tx_lock);
INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5639,7 +5640,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->regview = ioremap_nocache(dev->base_addr, mem_len);
if (!bp->regview) {
- printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
+ dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -5711,8 +5712,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
!(bp->flags & PCIX_FLAG)) {
- printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
- "aborting.\n");
+ dev_err(&pdev->dev,
+ "5706 A1 can only be used in a PCIX bus, aborting.\n");
goto err_out_unmap;
}
@@ -5733,7 +5734,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
BNX2_DEV_INFO_SIGNATURE_MAGIC) {
- printk(KERN_ERR PFX "Firmware not running, aborting.\n");
+ dev_err(&pdev->dev, "Firmware not running, aborting.\n");
rc = -ENODEV;
goto err_out_unmap;
}
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[5] = (u8) reg;
bp->tx_ring_size = MAX_TX_DESC_CNT;
- bnx2_set_rx_ring_size(bp, 100);
+ bnx2_set_rx_ring_size(bp, 255);
bp->rx_csum = 1;
@@ -5895,7 +5896,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
if ((rc = register_netdev(dev))) {
- printk(KERN_ERR PFX "Cannot register net device\n");
+ dev_err(&pdev->dev, "Cannot register net device\n");
if (bp->regview)
iounmap(bp->regview);
pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c73..fe804763c607 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
u16 tx_prod;
- struct tx_bd *tx_desc_ring;
- struct sw_bd *tx_buf_ring;
- int tx_ring_size;
-
u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
u16 hw_tx_cons;
@@ -3916,9 +3912,11 @@ struct bnx2 {
struct sw_bd *rx_buf_ring;
struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
- /* Only used to synchronize netif_stop_queue/wake_queue when tx */
- /* ring is full */
- spinlock_t tx_lock;
+ /* TX constants */
+ struct tx_bd *tx_desc_ring;
+ struct sw_bd *tx_buf_ring;
+ int tx_ring_size;
+ u32 tx_wake_thresh;
/* End of fields used in the performance code paths. */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d33130f64700..a31544ccb3c4 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4887,13 +4887,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
+ dev_err(&pdev->dev, "Cannot find proper PCI device "
"base address, aborting.\n");
err = -ENODEV;
goto err_out_disable_pdev;
@@ -4901,7 +4900,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
@@ -4910,8 +4909,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, dev->name);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
@@ -4941,7 +4939,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
if (pci_write_config_byte(pdev,
PCI_CACHE_LINE_SIZE,
cas_cacheline_size)) {
- printk(KERN_ERR PFX "Could not set PCI cache "
+ dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
goto err_write_cacheline;
}
@@ -4955,7 +4953,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
DMA_64BIT_MASK);
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
+ dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");
goto err_out_free_res;
}
@@ -4963,7 +4961,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
+ dev_err(&pdev->dev, "No usable DMA configuration, "
"aborting.\n");
goto err_out_free_res;
}
@@ -5023,8 +5021,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
if (cp->regs == 0UL) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
@@ -5040,8 +5037,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- printk(KERN_ERR PFX "Cannot allocate init block, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
goto err_out_iounmap;
}
@@ -5085,8 +5081,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_free_consistent;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 87f94d939ff8..61b3754f50ff 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 47eecce35fa4..2dcca79b1f6a 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL");
*/
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
struct net_local *lp;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 2038ca7e49ce..6ad5796121c8 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -703,8 +703,8 @@ static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id,
return IRQ_HANDLED;
}
-static irqreturn_t
-lance_interrupt(const int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t lance_interrupt(const int irq, void *dev_id,
+ struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct lance_private *lp = netdev_priv(dev);
@@ -1253,7 +1253,7 @@ static int __init dec_lance_init(const int type, const int slot)
return 0;
err_out_free_dev:
- kfree(dev);
+ free_netdev(dev);
err_out:
return ret;
@@ -1299,6 +1299,7 @@ static void __exit dec_lance_cleanup(void)
while (root_lance_dev) {
struct net_device *dev = root_lance_dev;
struct lance_private *lp = netdev_priv(dev);
+
unregister_netdev(dev);
#ifdef CONFIG_TC
if (lp->slot >= 0)
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 4b6ddb70f921..402961e68c89 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -9,49 +9,10 @@
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
*/
-/*
- Rev Date Description
- ==========================================================================
- 0.01 2001/05/03 Created DL2000-based linux driver
- 0.02 2001/05/21 Added VLAN and hardware checksum support.
- 1.00 2001/06/26 Added jumbo frame support.
- 1.01 2001/08/21 Added two parameters, rx_coalesce and rx_timeout.
- 1.02 2001/10/08 Supported fiber media.
- Added flow control parameters.
- 1.03 2001/10/12 Changed the default media to 1000mbps_fd for
- the fiber devices.
- 1.04 2001/11/08 Fixed Tx stopped when tx very busy.
- 1.05 2001/11/22 Fixed Tx stopped when unidirectional tx busy.
- 1.06 2001/12/13 Fixed disconnect bug at 10Mbps mode.
- Fixed tx_full flag incorrect.
- Added tx_coalesce paramter.
- 1.07 2002/01/03 Fixed miscount of RX frame error.
- 1.08 2002/01/17 Fixed the multicast bug.
- 1.09 2002/03/07 Move rx-poll-now to re-fill loop.
- Added rio_timer() to watch rx buffers.
- 1.10 2002/04/16 Fixed miscount of carrier error.
- 1.11 2002/05/23 Added ISR schedule scheme
- Fixed miscount of rx frame error for DGE-550SX.
- Fixed VLAN bug.
- 1.12 2002/06/13 Lock tx_coalesce=1 on 10/100Mbps mode.
- 1.13 2002/08/13 1. Fix disconnection (many tx:carrier/rx:frame
- errs) with some mainboards.
- 2. Use definition "DRV_NAME" "DRV_VERSION"
- "DRV_RELDATE" for flexibility.
- 1.14 2002/08/14 Support ethtool.
- 1.15 2002/08/27 Changed the default media to Auto-Negotiation
- for the fiber devices.
- 1.16 2002/09/04 More power down time for fiber devices auto-
- negotiation.
- Fix disconnect bug after ifup and ifdown.
- 1.17 2002/10/03 Fix RMON statistics overflow.
- Always use I/O mapping to access eeprom,
- avoid system freezing with some chipsets.
-*/
#define DRV_NAME "D-Link DL2000-based linux driver"
-#define DRV_VERSION "v1.17b"
-#define DRV_RELDATE "2006/03/10"
+#define DRV_VERSION "v1.18"
+#define DRV_RELDATE "2006/06/27"
#include "dl2k.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 1b758b707134..a860ebbbf815 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev)
spin_unlock_irqrestore(&db->lock,flags);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ *Used by netconsole
+ */
+static void dm9000_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ dm9000_interrupt(dev->irq,dev,NULL);
+ enable_irq(dev->irq);
+}
+#endif
/* dm9000_release_board
*
@@ -366,8 +377,8 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
kfree(db->data_req);
}
- if (db->addr_res != NULL) {
- release_resource(db->addr_res);
+ if (db->addr_req != NULL) {
+ release_resource(db->addr_req);
kfree(db->addr_req);
}
}
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev)
ndev->stop = &dm9000_stop;
ndev->get_stats = &dm9000_get_stats;
ndev->set_multicast_list = &dm9000_hash_table;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = &dm9000_poll_controller;
+#endif
#ifdef DM9000_PROGRAM_EEPROM
program_eeprom(db);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 36d511729f71..2146cf74425e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
for (i = 0; i < numdummies && !err; i++)
err = dummy_init_one(i);
if (err) {
+ i--;
while (--i >= 0)
dummy_free_one(i);
}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 91ef5f2fd768..ce850f1078b5 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -173,8 +173,11 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = 3;
+static int eeprom_bad_csum_allow = 0;
module_param(debug, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
#define DPRINTK(nlevel, klevel, fmt, args...) \
(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic)
checksum = le16_to_cpu(0xBABA - checksum);
if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
- return -EAGAIN;
+ if (!eeprom_bad_csum_allow)
+ return -EAGAIN;
}
return 0;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3042d33e2d4d..d304297c496c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -68,7 +68,6 @@
#ifdef NETIF_F_TSO
#include <net/checksum.h>
#endif
-#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
@@ -111,6 +110,9 @@ struct e1000_adapter;
#define E1000_MIN_RXD 80
#define E1000_MAX_82544_RXD 4096
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
@@ -143,6 +145,7 @@ struct e1000_adapter;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_ICH8_APME 0x0004
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
@@ -254,7 +257,6 @@ struct e1000_adapter {
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem;
- struct work_struct watchdog_task;
struct work_struct reset_task;
uint8_t fc_autoneg;
@@ -339,8 +341,14 @@ struct e1000_adapter {
#ifdef NETIF_F_TSO
boolean_t tso_force;
#endif
+ boolean_t smart_power_down; /* phy smart power down */
+ unsigned long flags;
};
+enum e1000_state_t {
+ __E1000_DRIVER_TESTING,
+ __E1000_RESETTING,
+};
/* e1000_main.c */
extern char e1000_driver_name[];
@@ -348,6 +356,7 @@ extern char e1000_driver_version[];
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d19664891768..88a82ba88f57 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -109,7 +109,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
-
+ if (hw->phy_type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -203,11 +204,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* reset the link */
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_reset(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
return 0;
@@ -254,10 +253,9 @@ e1000_set_pauseparam(struct net_device *netdev,
hw->original_fc = hw->fc;
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
} else
return ((hw->media_type == e1000_media_type_fiber) ?
@@ -279,10 +277,9 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
return 0;
}
@@ -577,6 +574,7 @@ e1000_get_drvinfo(struct net_device *netdev,
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
+ case e1000_ich8lan:
sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
@@ -631,6 +629,9 @@ e1000_set_ringparam(struct net_device *netdev,
tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
if (netif_running(adapter->netdev))
e1000_down(adapter);
@@ -691,9 +692,11 @@ e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
if ((err = e1000_up(adapter)))
- return err;
+ goto err_setup;
}
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
return 0;
err_setup_tx:
e1000_free_all_rx_resources(adapter);
@@ -701,6 +704,8 @@ err_setup_rx:
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
e1000_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
@@ -754,6 +759,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
toggle = 0x7FFFF3FF;
break;
case e1000_82573:
+ case e1000_ich8lan:
toggle = 0x7FFFF033;
break;
default:
@@ -773,11 +779,12 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* restore previous status */
E1000_WRITE_REG(&adapter->hw, STATUS, before);
-
- REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -790,20 +797,22 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
- REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
+ before = (adapter->hw.mac_type == e1000_ich8lan ?
+ 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (adapter->hw.mac_type >= e1000_82543) {
- REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
+ REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ if (adapter->hw.mac_type != e1000_ich8lan)
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
-
- for (i = 0; i < E1000_RAR_ENTRIES; i++) {
- REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
- 0xFFFFFFFF);
+ value = (adapter->hw.mac_type == e1000_ich8lan ?
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
@@ -817,7 +826,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
- for (i = 0; i < E1000_MC_TBL_SIZE; i++)
+ value = (adapter->hw.mac_type == e1000_ich8lan ?
+ E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+ for (i = 0; i < value; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
*data = 0;
@@ -889,6 +900,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Test each interrupt */
for (; i < 10; i++) {
+ if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
+ continue;
/* Interrupt to test */
mask = 1 << i;
@@ -1246,18 +1259,33 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
} else if (adapter->hw.phy_type == e1000_phy_gg82563) {
e1000_write_phy_reg(&adapter->hw,
GG82563_PHY_KMRN_MODE_CTRL,
- 0x1CE);
+ 0x1CC);
}
- /* force 1000, set loopback */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
- /* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->hw.phy_type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ }
if (adapter->hw.media_type == e1000_media_type_copper &&
adapter->hw.phy_type == e1000_phy_m88) {
@@ -1317,6 +1345,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
+ case e1000_ich8lan:
return e1000_integrated_phy_loopback(adapter);
break;
@@ -1568,6 +1597,7 @@ e1000_diag_test(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
boolean_t if_running = netif_running(netdev);
+ set_bit(__E1000_DRIVER_TESTING, &adapter->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -1582,7 +1612,8 @@ e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
- e1000_down(adapter);
+ /* indicate we're in test mode */
+ dev_close(netdev);
else
e1000_reset(adapter);
@@ -1607,8 +1638,9 @@ e1000_diag_test(struct net_device *netdev,
adapter->hw.autoneg = autoneg;
e1000_reset(adapter);
+ clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
if (if_running)
- e1000_up(adapter);
+ dev_open(netdev);
} else {
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1619,6 +1651,8 @@ e1000_diag_test(struct net_device *netdev,
data[1] = 0;
data[2] = 0;
data[3] = 0;
+
+ clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
}
msleep_interruptible(4 * 1000);
}
@@ -1778,21 +1812,18 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
- } else if (adapter->hw.mac_type < e1000_82573) {
- E1000_WRITE_REG(&adapter->hw, LEDCTL,
- (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
+ } else if (adapter->hw.phy_type == e1000_phy_ife) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long) adapter;
+ }
+ mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
} else {
- E1000_WRITE_REG(&adapter->hw, LEDCTL,
- (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
+ e1000_blink_led_start(&adapter->hw);
msleep_interruptible(data * 1000);
}
@@ -1807,10 +1838,8 @@ static int
e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- }
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 3959039b16ec..b3b919116e0f 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -101,9 +101,37 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset,
#define E1000_WRITE_REG_IO(a, reg, val) \
e1000_write_reg_io((a), E1000_##reg, val)
-static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw);
+static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
+ uint16_t duplex);
static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw,
+ uint32_t segment);
+static int32_t e1000_get_software_flag(struct e1000_hw *hw);
+static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
+static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t* data);
+static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
+ uint16_t *data);
+static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
+ uint16_t *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw,
+ uint32_t no_snoop);
+static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw,
+ uint32_t index, uint8_t byte);
+static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t data);
+static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
+ uint16_t data);
+
/* IGP cable length table */
static const
uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
@@ -156,6 +184,14 @@ e1000_set_phy_type(struct e1000_hw *hw)
hw->phy_type = e1000_phy_igp;
break;
}
+ case IGP03E1000_E_PHY_ID:
+ hw->phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ hw->phy_type = e1000_phy_ife;
+ break;
case GG82563_E_PHY_ID:
if (hw->mac_type == e1000_80003es2lan) {
hw->phy_type = e1000_phy_gg82563;
@@ -332,6 +368,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82541EI:
case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
hw->mac_type = e1000_82541;
break;
case E1000_DEV_ID_82541ER:
@@ -341,6 +378,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
hw->mac_type = e1000_82541_rev_2;
break;
case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
hw->mac_type = e1000_82547;
break;
case E1000_DEV_ID_82547GI:
@@ -354,6 +392,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82572EI_COPPER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_82572EI:
hw->mac_type = e1000_82572;
break;
case E1000_DEV_ID_82573E:
@@ -361,16 +400,29 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82573L:
hw->mac_type = e1000_82573;
break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->mac_type = e1000_80003es2lan;
break;
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ hw->mac_type = e1000_ich8lan;
+ break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
}
switch(hw->mac_type) {
+ case e1000_ich8lan:
+ hw->swfwhw_semaphore_present = TRUE;
+ hw->asf_firmware_present = TRUE;
+ break;
case e1000_80003es2lan:
hw->swfw_sync_present = TRUE;
/* fall through */
@@ -423,6 +475,7 @@ e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber;
break;
+ case e1000_ich8lan:
case e1000_82573:
/* The STATUS_TBIMODE bit is reserved or reused for the this
* device.
@@ -527,6 +580,14 @@ e1000_reset_hw(struct e1000_hw *hw)
} while(timeout);
}
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
+ }
+
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
@@ -550,6 +611,20 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */
E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
+ case e1000_ich8lan:
+ if (!hw->phy_reset_disable &&
+ e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+ /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+ * at the same time to make sure the interface between
+ * MAC and the external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+ }
+
+ e1000_get_software_flag(hw);
+ E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ msec_delay(5);
+ break;
default:
E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
@@ -591,6 +666,7 @@ e1000_reset_hw(struct e1000_hw *hw)
/* fall through */
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
case e1000_80003es2lan:
ret_val = e1000_get_auto_rd_done(hw);
if(ret_val)
@@ -633,6 +709,12 @@ e1000_reset_hw(struct e1000_hw *hw)
e1000_pci_set_mwi(hw);
}
+ if (hw->mac_type == e1000_ich8lan) {
+ uint32_t kab = E1000_READ_REG(hw, KABGTXD);
+ kab |= E1000_KABGTXD_BGSQLBIAS;
+ E1000_WRITE_REG(hw, KABGTXD, kab);
+ }
+
return E1000_SUCCESS;
}
@@ -675,9 +757,12 @@ e1000_init_hw(struct e1000_hw *hw)
/* Disabling VLAN filtering. */
DEBUGOUT("Initializing the IEEE VLAN\n");
- if (hw->mac_type < e1000_82545_rev_3)
- E1000_WRITE_REG(hw, VET, 0);
- e1000_clear_vfta(hw);
+ /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+ if (hw->mac_type != e1000_ich8lan) {
+ if (hw->mac_type < e1000_82545_rev_3)
+ E1000_WRITE_REG(hw, VET, 0);
+ e1000_clear_vfta(hw);
+ }
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if(hw->mac_type == e1000_82542_rev2_0) {
@@ -705,8 +790,14 @@ e1000_init_hw(struct e1000_hw *hw)
/* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
- for(i = 0; i < mta_size; i++)
+ if (hw->mac_type == e1000_ich8lan)
+ mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+ for(i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occuring when accessing our register space */
+ E1000_WRITE_FLUSH(hw);
+ }
/* Set the PCI priority bit correctly in the CTRL register. This
* determines if the adapter gives priority to receives, or if it
@@ -744,6 +835,10 @@ e1000_init_hw(struct e1000_hw *hw)
break;
}
+ /* More time needed for PHY to initialize */
+ if (hw->mac_type == e1000_ich8lan)
+ msec_delay(15);
+
/* Call a subroutine to configure the link and setup flow control. */
ret_val = e1000_setup_link(hw);
@@ -757,6 +852,7 @@ e1000_init_hw(struct e1000_hw *hw)
case e1000_82571:
case e1000_82572:
case e1000_82573:
+ case e1000_ich8lan:
case e1000_80003es2lan:
ctrl |= E1000_TXDCTL_COUNT_DESC;
break;
@@ -795,6 +891,7 @@ e1000_init_hw(struct e1000_hw *hw)
/* Fall through */
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
ctrl = E1000_READ_REG(hw, TXDCTL1);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
if(hw->mac_type >= e1000_82571)
@@ -818,6 +915,11 @@ e1000_init_hw(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs(hw);
+ /* ICH8 No-snoop bits are opposite polarity.
+ * Set to snoop by default after reset. */
+ if (hw->mac_type == e1000_ich8lan)
+ e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
@@ -905,6 +1007,7 @@ e1000_setup_link(struct e1000_hw *hw)
*/
if (hw->fc == e1000_fc_default) {
switch (hw->mac_type) {
+ case e1000_ich8lan:
case e1000_82573:
hw->fc = e1000_fc_full;
break;
@@ -971,9 +1074,12 @@ e1000_setup_link(struct e1000_hw *hw)
*/
DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
- E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
- E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+ /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+ if (hw->mac_type != e1000_ich8lan) {
+ E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ }
E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
@@ -1237,12 +1343,13 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* Wait 10ms for MAC to configure PHY from eeprom settings */
msec_delay(15);
-
+ if (hw->mac_type != e1000_ich8lan) {
/* Configure activity LED after PHY reset */
led_ctrl = E1000_READ_REG(hw, LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ }
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, FALSE);
@@ -1478,8 +1585,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Enable Pass False Carrier on the PHY */
- phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
phy_data);
@@ -1561,28 +1667,40 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
if(hw->disable_polarity_correction == 1)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
- return ret_val;
-
- /* Force TX_CLK in the Extended PHY Specific Control Register
- * to 25MHz clock.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
return ret_val;
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
if (hw->phy_revision < M88E1011_I_REV_4) {
- /* Configure Master and Slave downshift values */
- phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
- phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
- ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
- return ret_val;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
}
/* SW Reset the PHY so all changes take effect */
@@ -1620,6 +1738,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
if(hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /* IFE phy only supports 10/100 */
+ if (hw->phy_type == e1000_phy_ife)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if(ret_val) {
@@ -1717,6 +1839,26 @@ e1000_setup_copper_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_copper_link");
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+ if (ret_val)
+ return ret_val;
+ default:
+ break;
+ }
+
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
if(ret_val)
@@ -1724,10 +1866,8 @@ e1000_setup_copper_link(struct e1000_hw *hw)
switch (hw->mac_type) {
case e1000_80003es2lan:
- ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
- if (ret_val)
- return ret_val;
+ /* Kumeran registers are written-only */
+ reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
reg_data);
@@ -1739,6 +1879,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_copper_link_igp_setup(hw);
if(ret_val)
@@ -1803,7 +1944,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
******************************************************************************/
static int32_t
-e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
+e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
{
int32_t ret_val = E1000_SUCCESS;
uint32_t tipg;
@@ -1823,6 +1964,18 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
E1000_WRITE_REG(hw, TIPG, tipg);
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+ if (ret_val)
+ return ret_val;
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
return ret_val;
}
@@ -1847,6 +2000,14 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
E1000_WRITE_REG(hw, TIPG, tipg);
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
return ret_val;
}
@@ -1869,10 +2030,13 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- /* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
- if(ret_val)
- return ret_val;
+ if (hw->phy_type != e1000_phy_ife) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ } else
+ mii_1000t_ctrl_reg=0;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1923,6 +2087,9 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
DEBUGOUT("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ if (hw->phy_type == e1000_phy_ife) {
+ DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+ }
}
/* Check for a software override of the flow control settings, and
@@ -1984,9 +2151,11 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if(ret_val)
- return ret_val;
+ if (hw->phy_type != e1000_phy_ife) {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
return E1000_SUCCESS;
}
@@ -2089,6 +2258,18 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
+ /* Disable MDI-X support for 10/100 */
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PMC_AUTO_MDIX;
+ phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
} else {
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed or duplex are forced.
@@ -2721,8 +2902,12 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
if(hw->tbi_compatibility_en) {
uint16_t speed, duplex;
- e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if(speed != SPEED_1000) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
/* If link speed is not set to gigabit speed, we do not need
* to enable TBI compatibility.
*/
@@ -2889,7 +3074,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
if (*speed == SPEED_1000)
ret_val = e1000_configure_kmrn_for_1000(hw);
else
- ret_val = e1000_configure_kmrn_for_10_100(hw);
+ ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+ ret_val = e1000_kumeran_lock_loss_workaround(hw);
if (ret_val)
return ret_val;
}
@@ -3069,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
return data;
}
-int32_t
+static int32_t
e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
{
uint32_t swfw_sync = 0;
@@ -3079,6 +3270,9 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
DEBUGFUNC("e1000_swfw_sync_acquire");
+ if (hw->swfwhw_semaphore_present)
+ return e1000_get_software_flag(hw);
+
if (!hw->swfw_sync_present)
return e1000_get_hw_eeprom_semaphore(hw);
@@ -3110,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
return E1000_SUCCESS;
}
-void
+static void
e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
{
uint32_t swfw_sync;
@@ -3118,6 +3312,11 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
DEBUGFUNC("e1000_swfw_sync_release");
+ if (hw->swfwhw_semaphore_present) {
+ e1000_release_software_flag(hw);
+ return;
+ }
+
if (!hw->swfw_sync_present) {
e1000_put_hw_eeprom_semaphore(hw);
return;
@@ -3160,7 +3359,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3299,7 +3499,8 @@ e1000_write_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3401,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
return E1000_SUCCESS;
}
-int32_t
+static int32_t
e1000_read_kmrn_reg(struct e1000_hw *hw,
uint32_t reg_addr,
uint16_t *data)
@@ -3434,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
return E1000_SUCCESS;
}
-int32_t
+static int32_t
e1000_write_kmrn_reg(struct e1000_hw *hw,
uint32_t reg_addr,
uint16_t data)
@@ -3514,7 +3715,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH(hw);
if (hw->mac_type >= e1000_82571)
- msec_delay(10);
+ msec_delay_irq(10);
e1000_swfw_sync_release(hw, swfw);
} else {
/* Read the Extended Device Control Register, assert the PHY_RESET_DIR
@@ -3544,6 +3745,12 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
ret_val = e1000_get_phy_cfg_done(hw);
e1000_release_software_semaphore(hw);
+ if ((hw->mac_type == e1000_ich8lan) &&
+ (hw->phy_type == e1000_phy_igp_3)) {
+ ret_val = e1000_init_lcd_from_nvm(hw);
+ if (ret_val)
+ return ret_val;
+ }
return ret_val;
}
@@ -3572,9 +3779,11 @@ e1000_phy_reset(struct e1000_hw *hw)
case e1000_82541_rev_2:
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
ret_val = e1000_phy_hw_reset(hw);
if(ret_val)
return ret_val;
+
break;
default:
ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
@@ -3597,11 +3806,120 @@ e1000_phy_reset(struct e1000_hw *hw)
}
/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void
+e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+ int32_t reg;
+ uint16_t phy_data;
+ int32_t retry = 0;
+
+ DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return;
+
+ do {
+ /* Disable link */
+ reg = E1000_READ_REG(hw, PHY_CTRL);
+ E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* Write VR power-down enable */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
+ IGP3_VR_CTRL_MODE_SHUT);
+
+ /* Read it back and test */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = E1000_READ_REG(hw, CTRL);
+ E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+
+ return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ int32_t reg;
+ int32_t cnt;
+ uint16_t phy_data;
+
+ if (hw->kmrn_lock_loss_workaround_disabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouls up link
+ * stability */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ /* read once to clear */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ msec_delay_irq(5);
+ }
+ /* Disable GigE link negotiation */
+ reg = E1000_READ_REG(hw, PHY_CTRL);
+ E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* unable to acquire PCS lock */
+ return E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Probes the expected PHY address for known PHY IDs
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static int32_t
+int32_t
e1000_detect_gig_phy(struct e1000_hw *hw)
{
int32_t phy_init_status, ret_val;
@@ -3613,8 +3931,8 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
/* The 82571 firmware may still be configuring the PHY. In this
* case, we cannot access the PHY until the configuration is done. So
* we explicitly set the PHY values. */
- if(hw->mac_type == e1000_82571 ||
- hw->mac_type == e1000_82572) {
+ if (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) {
hw->phy_id = IGP01E1000_I_PHY_ID;
hw->phy_type = e1000_phy_igp_2;
return E1000_SUCCESS;
@@ -3631,7 +3949,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
/* Read the PHY ID Registers to identify which PHY is onboard. */
ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->phy_id = (uint32_t) (phy_id_high << 16);
@@ -3669,6 +3987,12 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
case e1000_80003es2lan:
if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
break;
+ case e1000_ich8lan:
+ if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
+ break;
default:
DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
@@ -3784,6 +4108,53 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
}
/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static int32_t
+e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ int32_t ret_val;
+ uint16_t phy_data, polarity;
+
+ DEBUGFUNC("e1000_phy_ife_get_info");
+
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_info->polarity_correction =
+ (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+ IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT;
+
+ if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced. */
+ polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >>
+ IFE_PSC_FORCE_POLARITY_SHIFT;
+ }
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+ IFE_PMC_MDIX_MODE_SHIFT;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Get PHY information from various PHY registers fot m88 PHY only.
*
* hw - Struct containing variables accessed by shared code
@@ -3898,9 +4269,12 @@ e1000_phy_get_info(struct e1000_hw *hw,
return -E1000_ERR_CONFIG;
}
- if(hw->phy_type == e1000_phy_igp ||
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2)
return e1000_phy_igp_get_info(hw, phy_info);
+ else if (hw->phy_type == e1000_phy_ife)
+ return e1000_phy_ife_get_info(hw, phy_info);
else
return e1000_phy_m88_get_info(hw, phy_info);
}
@@ -4049,6 +4423,35 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom->use_eerd = TRUE;
eeprom->use_eewr = FALSE;
break;
+ case e1000_ich8lan:
+ {
+ int32_t i = 0;
+ uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+
+ eeprom->type = e1000_eeprom_ich8;
+ eeprom->use_eerd = FALSE;
+ eeprom->use_eewr = FALSE;
+ eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Zero the shadow RAM structure. But don't load it from NVM
+ * so as to save time for driver init */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = FALSE;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
+ ICH8_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
+ hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
+ hw->flash_bank_size /= 2 * sizeof(uint16_t);
+
+ break;
+ }
default:
break;
}
@@ -4469,7 +4872,10 @@ e1000_read_eeprom(struct e1000_hw *hw,
return ret_val;
}
- if(eeprom->type == e1000_eeprom_spi) {
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+ if (eeprom->type == e1000_eeprom_spi) {
uint16_t word_in;
uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -4636,7 +5042,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
- if(hw->mac_type == e1000_82573) {
+ if (hw->mac_type == e1000_ich8lan)
+ return FALSE;
+
+ if (hw->mac_type == e1000_82573) {
eecd = E1000_READ_REG(hw, EECD);
/* Isolate bits 15 & 16 */
@@ -4686,8 +5095,22 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
}
}
- for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
- if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Drivers must allocate the shadow ram structure for the
+ * EEPROM checksum to be updated. Otherwise, this bit as well
+ * as the checksum must both be set correctly for this
+ * validation to pass.
+ */
+ e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+ if ((eeprom_data & 0x40) == 0) {
+ eeprom_data |= 0x40;
+ e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4713,6 +5136,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
int32_t
e1000_update_eeprom_checksum(struct e1000_hw *hw)
{
+ uint32_t ctrl_ext;
uint16_t checksum = 0;
uint16_t i, eeprom_data;
@@ -4731,6 +5155,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
return -E1000_ERR_EEPROM;
} else if (hw->eeprom.type == e1000_eeprom_flash) {
e1000_commit_shadow_ram(hw);
+ } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+ e1000_commit_shadow_ram(hw);
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after next adapter reset. */
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ msec_delay(10);
}
return E1000_SUCCESS;
}
@@ -4770,6 +5202,9 @@ e1000_write_eeprom(struct e1000_hw *hw,
if(eeprom->use_eewr == TRUE)
return e1000_write_eeprom_eewr(hw, offset, words, data);
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_write_eeprom_ich8(hw, offset, words, data);
+
/* Prepare the EEPROM for writing */
if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
return -E1000_ERR_EEPROM;
@@ -4957,11 +5392,17 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
uint32_t flop = 0;
uint32_t i = 0;
int32_t error = E1000_SUCCESS;
-
- /* The flop register will be used to determine if flash type is STM */
- flop = E1000_READ_REG(hw, FLOP);
+ uint32_t old_bank_offset = 0;
+ uint32_t new_bank_offset = 0;
+ uint32_t sector_retries = 0;
+ uint8_t low_byte = 0;
+ uint8_t high_byte = 0;
+ uint8_t temp_byte = 0;
+ boolean_t sector_write_failed = FALSE;
if (hw->mac_type == e1000_82573) {
+ /* The flop register will be used to determine if flash type is STM */
+ flop = E1000_READ_REG(hw, FLOP);
for (i=0; i < attempts; i++) {
eecd = E1000_READ_REG(hw, EECD);
if ((eecd & E1000_EECD_FLUPD) == 0) {
@@ -4995,6 +5436,106 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
}
}
+ if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written */
+ if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
+ new_bank_offset = hw->flash_bank_size * 2;
+ old_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 1);
+ } else {
+ old_bank_offset = hw->flash_bank_size * 2;
+ new_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 0);
+ }
+
+ do {
+ sector_write_failed = FALSE;
+ /* Loop for every byte in the shadow RAM,
+ * which is in units of words. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM */
+ if (hw->eeprom_shadow_ram[i].modified == TRUE) {
+ low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &temp_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset,
+ low_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ high_byte =
+ (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &temp_byte);
+ udelay(100);
+ } else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &low_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &high_byte);
+ }
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress */
+ if (i == E1000_ICH8_NVM_SIG_WORD)
+ high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset + 1, high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+
+ if (sector_write_failed == FALSE) {
+ /* Clear the now not used entry in the cache */
+ hw->eeprom_shadow_ram[i].modified = FALSE;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed. */
+ if (sector_write_failed == FALSE) {
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b */
+ e1000_read_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ &high_byte);
+ high_byte &= 0xBF;
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase */
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset,
+ 0);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ }
+ } while (++sector_retries < 10 && sector_write_failed == TRUE);
+ }
+
return error;
}
@@ -5102,15 +5643,19 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
* the other port. */
if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
rar_num -= 1;
+ if (hw->mac_type == e1000_ich8lan)
+ rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
/* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n");
for(i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
}
-#if 0
/******************************************************************************
* Updates the MAC's list of multicast addresses.
*
@@ -5125,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
* for the first 15 multicast addresses, and hashes the rest into the
* multicast table.
*****************************************************************************/
+#if 0
void
e1000_mc_addr_list_update(struct e1000_hw *hw,
uint8_t *mc_addr_list,
@@ -5145,6 +5691,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
/* Clear RAR[1-15] */
DEBUGOUT(" Clearing RAR[1-15]\n");
num_rar_entry = E1000_RAR_ENTRIES;
+ if (hw->mac_type == e1000_ich8lan)
+ num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN;
/* Reserve a spot for the Locally Administered Address to work around
* an 82571 issue in which a reset on one port will reload the MAC on
* the other port. */
@@ -5153,14 +5701,19 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
for(i = rar_used_count; i < num_rar_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Clear the MTA */
DEBUGOUT(" Clearing MTA\n");
num_mta_entry = E1000_NUM_MTA_REGISTERS;
+ if (hw->mac_type == e1000_ich8lan)
+ num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
for(i = 0; i < num_mta_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Add the new addresses */
@@ -5217,24 +5770,46 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
* LSB MSB
*/
case 0:
- /* [47:36] i.e. 0x563 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [47:38] i.e. 0x158 for above example address */
+ hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
+ } else {
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ }
break;
case 1:
- /* [46:35] i.e. 0xAC6 for above example address */
- hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [46:37] i.e. 0x2B1 for above example address */
+ hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
+ } else {
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ }
break;
case 2:
- /* [45:34] i.e. 0x5D8 for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ if (hw->mac_type == e1000_ich8lan) {
+ /*[45:36] i.e. 0x163 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ } else {
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ }
break;
case 3:
- /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [43:34] i.e. 0x18D for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ } else {
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ }
break;
}
hash_value &= 0xFFF;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_value &= 0x3FF;
return hash_value;
}
@@ -5262,6 +5837,8 @@ e1000_mta_set(struct e1000_hw *hw,
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 5) & 0x7F;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_reg &= 0x1F;
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -5275,9 +5852,12 @@ e1000_mta_set(struct e1000_hw *hw,
if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5334,7 +5914,9 @@ e1000_rar_set(struct e1000_hw *hw,
}
E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
}
/******************************************************************************
@@ -5351,12 +5933,18 @@ e1000_write_vfta(struct e1000_hw *hw,
{
uint32_t temp;
- if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5373,6 +5961,9 @@ e1000_clear_vfta(struct e1000_hw *hw)
uint32_t vfta_offset = 0;
uint32_t vfta_bit_in_reg = 0;
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
if (hw->mac_type == e1000_82573) {
if (hw->mng_cookie.vlan_id != 0) {
/* The VFTA is a 4096b bit-field, each identifying a single VLAN
@@ -5392,6 +5983,7 @@ e1000_clear_vfta(struct e1000_hw *hw)
* manageability unit */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5421,9 +6013,18 @@ e1000_id_led_init(struct e1000_hw * hw)
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
- if((eeprom_data== ID_LED_RESERVED_0000) ||
- (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT;
- for(i = 0; i < 4; i++) {
+
+ if ((hw->mac_type == e1000_82573) &&
+ (eeprom_data == ID_LED_RESERVED_82573))
+ eeprom_data = ID_LED_DEFAULT_82573;
+ else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ if (hw->mac_type == e1000_ich8lan)
+ eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+ else
+ eeprom_data = ID_LED_DEFAULT;
+ }
+ for (i = 0; i < 4; i++) {
temp = (eeprom_data >> (i << 2)) & led_mask;
switch(temp) {
case ID_LED_ON1_DEF2:
@@ -5519,6 +6120,44 @@ e1000_setup_led(struct e1000_hw *hw)
}
/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_blink_led_start(struct e1000_hw *hw)
+{
+ int16_t i;
+ uint32_t ledctl_blink = 0;
+
+ DEBUGFUNC("e1000_id_led_blink_on");
+
+ if (hw->mac_type < e1000_82571) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+ ledctl_blink = hw->ledctl_mode2;
+ for (i=0; i < 4; i++)
+ if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+ }
+
+ E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Restores the saved state of the SW controlable LED.
*
* hw - Struct containing variables accessed by shared code
@@ -5548,6 +6187,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
return ret_val;
/* Fall Through */
default:
+ if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ break;
+ }
/* Restore LEDCTL settings */
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
break;
@@ -5592,7 +6235,10 @@ e1000_led_on(struct e1000_hw *hw)
/* Clear SW Defineable Pin 0 to turn on the LED */
ctrl &= ~E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
- } else if(hw->media_type == e1000_media_type_copper) {
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+ } else if (hw->media_type == e1000_media_type_copper) {
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
return E1000_SUCCESS;
}
@@ -5640,7 +6286,10 @@ e1000_led_off(struct e1000_hw *hw)
/* Set SW Defineable Pin 0 to turn off the LED */
ctrl |= E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
- } else if(hw->media_type == e1000_media_type_copper) {
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ } else if (hw->media_type == e1000_media_type_copper) {
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
return E1000_SUCCESS;
}
@@ -5678,12 +6327,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, XOFFRXC);
temp = E1000_READ_REG(hw, XOFFTXC);
temp = E1000_READ_REG(hw, FCRUC);
+
+ if (hw->mac_type != e1000_ich8lan) {
temp = E1000_READ_REG(hw, PRC64);
temp = E1000_READ_REG(hw, PRC127);
temp = E1000_READ_REG(hw, PRC255);
temp = E1000_READ_REG(hw, PRC511);
temp = E1000_READ_REG(hw, PRC1023);
temp = E1000_READ_REG(hw, PRC1522);
+ }
+
temp = E1000_READ_REG(hw, GPRC);
temp = E1000_READ_REG(hw, BPRC);
temp = E1000_READ_REG(hw, MPRC);
@@ -5703,12 +6356,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, TOTH);
temp = E1000_READ_REG(hw, TPR);
temp = E1000_READ_REG(hw, TPT);
+
+ if (hw->mac_type != e1000_ich8lan) {
temp = E1000_READ_REG(hw, PTC64);
temp = E1000_READ_REG(hw, PTC127);
temp = E1000_READ_REG(hw, PTC255);
temp = E1000_READ_REG(hw, PTC511);
temp = E1000_READ_REG(hw, PTC1023);
temp = E1000_READ_REG(hw, PTC1522);
+ }
+
temp = E1000_READ_REG(hw, MPTC);
temp = E1000_READ_REG(hw, BPTC);
@@ -5731,6 +6388,9 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, IAC);
temp = E1000_READ_REG(hw, ICRXOC);
+
+ if (hw->mac_type == e1000_ich8lan) return;
+
temp = E1000_READ_REG(hw, ICRXPTC);
temp = E1000_READ_REG(hw, ICRXATC);
temp = E1000_READ_REG(hw, ICTXPTC);
@@ -5911,6 +6571,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw->bus_width = e1000_bus_width_pciex_1;
break;
case e1000_82571:
+ case e1000_ich8lan:
case e1000_80003es2lan:
hw->bus_type = e1000_bus_type_pci_express;
hw->bus_speed = e1000_bus_speed_2500;
@@ -5948,8 +6609,6 @@ e1000_get_bus_info(struct e1000_hw *hw)
break;
}
}
-
-#if 0
/******************************************************************************
* Reads a value from one of the devices registers using port I/O (as opposed
* memory mapped I/O). Only 82544 and newer devices support port I/O.
@@ -5957,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* offset - offset to read from
*****************************************************************************/
+#if 0
uint32_t
e1000_read_reg_io(struct e1000_hw *hw,
uint32_t offset)
@@ -6012,8 +6672,6 @@ e1000_get_cable_length(struct e1000_hw *hw,
{
int32_t ret_val;
uint16_t agc_value = 0;
- uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- uint16_t max_agc = 0;
uint16_t i, phy_data;
uint16_t cable_length;
@@ -6086,6 +6744,8 @@ e1000_get_cable_length(struct e1000_hw *hw,
break;
}
} else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ uint16_t cur_agc_value;
+ uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
{IGP01E1000_PHY_AGC_A,
IGP01E1000_PHY_AGC_B,
@@ -6098,23 +6758,23 @@ e1000_get_cable_length(struct e1000_hw *hw,
if(ret_val)
return ret_val;
- cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
- /* Array bound check. */
- if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
- (cur_agc == 0))
+ /* Value bound check. */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0))
return -E1000_ERR_PHY;
- agc_value += cur_agc;
+ agc_value += cur_agc_value;
/* Update minimal AGC value. */
- if(min_agc > cur_agc)
- min_agc = cur_agc;
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
}
/* Remove the minimal AGC result for length < 50m */
- if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
- agc_value -= min_agc;
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
/* Get the average length of the remaining 3 channels */
agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
@@ -6130,7 +6790,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
IGP01E1000_AGC_RANGE) : 0;
*max_length = e1000_igp_cable_length_table[agc_value] +
IGP01E1000_AGC_RANGE;
- } else if (hw->phy_type == e1000_phy_igp_2) {
+ } else if (hw->phy_type == e1000_phy_igp_2 ||
+ hw->phy_type == e1000_phy_igp_3) {
+ uint16_t cur_agc_index, max_agc_index = 0;
+ uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
{IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
@@ -6145,19 +6808,27 @@ e1000_get_cable_length(struct e1000_hw *hw,
/* Getting bits 15:9, which represent the combination of course and
* fine gain values. The result is a number that can be put into
* the lookup table to obtain the approximate cable length. */
- cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
- /* Remove min & max AGC values from calculation. */
- if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc])
- min_agc = cur_agc;
- if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc])
- max_agc = cur_agc;
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
- agc_value += e1000_igp_2_cable_length_table[cur_agc];
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
}
- agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]);
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
@@ -6203,7 +6874,8 @@ e1000_check_polarity(struct e1000_hw *hw,
return ret_val;
*polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
M88E1000_PSSR_REV_POLARITY_SHIFT;
- } else if(hw->phy_type == e1000_phy_igp ||
+ } else if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
/* Read the Status register to check the speed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
@@ -6229,6 +6901,13 @@ e1000_check_polarity(struct e1000_hw *hw,
* 100 Mbps this bit is always 0) */
*polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
}
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >>
+ IFE_PESC_POLARITY_REVERSED_SHIFT;
}
return E1000_SUCCESS;
}
@@ -6256,7 +6935,8 @@ e1000_check_downshift(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_downshift");
- if(hw->phy_type == e1000_phy_igp ||
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -6273,6 +6953,9 @@ e1000_check_downshift(struct e1000_hw *hw)
hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ /* e1000_phy_ife supports 10/100 speed only */
+ hw->speed_downgraded = FALSE;
}
return E1000_SUCCESS;
@@ -6317,7 +7000,9 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if(speed == SPEED_1000) {
- e1000_get_cable_length(hw, &min_length, &max_length);
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
min_length >= e1000_igp_cable_length_50) {
@@ -6525,20 +7210,27 @@ static int32_t
e1000_set_d3_lplu_state(struct e1000_hw *hw,
boolean_t active)
{
+ uint32_t phy_ctrl = 0;
int32_t ret_val;
uint16_t phy_data;
DEBUGFUNC("e1000_set_d3_lplu_state");
- if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2)
+ if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+ && hw->phy_type != e1000_phy_igp_3)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
* from the lowest speeds starting from 10Mbps. The capability is used for
* Dx transitions and states */
- if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+ if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
+ } else if (hw->mac_type == e1000_ich8lan) {
+ /* MAC writes into PHY register based on the state transition
+ * and start auto-negotiation. SW driver can overwrite the settings
+ * in CSR PHY power control E1000_PHY_CTRL register. */
+ phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if(ret_val)
@@ -6553,11 +7245,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
if(ret_val)
return ret_val;
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data &= ~IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
phy_data);
if (ret_val)
return ret_val;
+ }
}
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
@@ -6593,17 +7290,22 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
(hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
if(hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547_rev_2) {
+ hw->mac_type == e1000_82547_rev_2) {
phy_data |= IGP01E1000_GMII_FLEX_SPD;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
if(ret_val)
return ret_val;
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data |= IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
phy_data);
if (ret_val)
return ret_val;
+ }
}
/* When LPLU is enabled we should disable SmartSpeed */
@@ -6638,6 +7340,7 @@ static int32_t
e1000_set_d0_lplu_state(struct e1000_hw *hw,
boolean_t active)
{
+ uint32_t phy_ctrl = 0;
int32_t ret_val;
uint16_t phy_data;
DEBUGFUNC("e1000_set_d0_lplu_state");
@@ -6645,15 +7348,24 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
if(hw->mac_type <= e1000_82547_rev_2)
return E1000_SUCCESS;
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+ } else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if(ret_val)
return ret_val;
+ }
if (!active) {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
+ }
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
* Dx states where the power conservation is most important. During
@@ -6686,10 +7398,15 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
+ }
/* When LPLU is enabled we should disable SmartSpeed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
@@ -6928,8 +7645,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
length >>= 2;
/* The device driver writes the relevant command block into the ram area. */
- for (i = 0; i < length; i++)
+ for (i = 0; i < length; i++) {
E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
return E1000_SUCCESS;
}
@@ -6961,15 +7680,18 @@ e1000_mng_write_commit(
* returns - TRUE when the mode is IAMT or FALSE.
****************************************************************************/
boolean_t
-e1000_check_mng_mode(
- struct e1000_hw *hw)
+e1000_check_mng_mode(struct e1000_hw *hw)
{
uint32_t fwsm;
fwsm = E1000_READ_REG(hw, FWSM);
- if((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ if (hw->mac_type == e1000_ich8lan) {
+ if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return TRUE;
+ } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
return TRUE;
return FALSE;
@@ -7209,7 +7931,6 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
E1000_WRITE_REG(hw, CTRL, ctrl);
}
-#if 0
/***************************************************************************
*
* Enables PCI-Express master access.
@@ -7219,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
* returns: - none.
*
***************************************************************************/
+#if 0
void
e1000_enable_pciex_master(struct e1000_hw *hw)
{
@@ -7299,8 +8021,10 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
- while(timeout) {
- if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
+ case e1000_ich8lan:
+ while (timeout) {
+ if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
+ break;
else msec_delay(1);
timeout--;
}
@@ -7340,7 +8064,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
switch (hw->mac_type) {
default:
- msec_delay(10);
+ msec_delay_irq(10);
break;
case e1000_80003es2lan:
/* Separate *_CFG_DONE_* bit for each port */
@@ -7457,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
***************************************************************************/
-int32_t
+static int32_t
e1000_get_software_semaphore(struct e1000_hw *hw)
{
int32_t timeout = hw->eeprom.word_size + 1;
@@ -7492,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-void
+static void
e1000_release_software_semaphore(struct e1000_hw *hw)
{
uint32_t swsm;
@@ -7523,6 +8247,13 @@ int32_t
e1000_check_phy_reset_block(struct e1000_hw *hw)
{
uint32_t manc = 0;
+ uint32_t fwsm = 0;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ fwsm = E1000_READ_REG(hw, FWSM);
+ return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+ : E1000_BLK_PHY_RESET;
+ }
if (hw->mac_type > e1000_82547_rev_2)
manc = E1000_READ_REG(hw, MANC);
@@ -7549,6 +8280,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
if((fwsm & E1000_FWSM_MODE_MASK) != 0)
return TRUE;
break;
+ case e1000_ich8lan:
+ return TRUE;
default:
break;
}
@@ -7556,4 +8289,854 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
}
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+static int32_t
+e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
+{
+ uint32_t gcr_reg = 0;
+
+ DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+ if (hw->bus_type == e1000_bus_type_unknown)
+ e1000_get_bus_info(hw);
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ if (no_snoop) {
+ gcr_reg = E1000_READ_REG(hw, GCR);
+ gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+ gcr_reg |= no_snoop;
+ E1000_WRITE_REG(hw, GCR, gcr_reg);
+ }
+ if (hw->mac_type == e1000_ich8lan) {
+ uint32_t ctrl_ext;
+
+ E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
+
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static int32_t
+e1000_get_software_flag(struct e1000_hw *hw)
+{
+ int32_t timeout = PHY_CFG_TIMEOUT;
+ uint32_t extcnf_ctrl;
+
+ DEBUGFUNC("e1000_get_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("FW or HW locks the resource too long.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void
+e1000_release_software_flag(struct e1000_hw *hw)
+{
+ uint32_t extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+ }
+
+ return;
+}
+
+/***************************************************************************
+ *
+ * Disable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+#if 0
+int32_t
+e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
+{
+ uint16_t phy_data;
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_ife_disable_dynamic_power_down");
+
+ if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+ }
+
+ return ret_val;
+}
+#endif /* 0 */
+
+/***************************************************************************
+ *
+ * Enable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+#if 0
+int32_t
+e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
+{
+ uint16_t phy_data;
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_ife_enable_dynamic_power_down");
+
+ if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+ }
+
+ return ret_val;
+}
+#endif /* 0 */
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static int32_t
+e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+ uint16_t *data)
+{
+ int32_t error = E1000_SUCCESS;
+ uint32_t flash_bank = 0;
+ uint32_t act_offset = 0;
+ uint32_t bank_offset = 0;
+ uint16_t word = 0;
+ uint16_t i = 0;
+
+ /* We need to know which is the valid flash bank. In the event
+ * that we didn't allocate eeprom_shadow_ram, we may not be
+ * managing flash_bank. So it cannot be trusted and needs
+ * to be updated with each read.
+ */
+ /* Value of bit 22 corresponds to the flash bank we're on. */
+ flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+ /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+ bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ for (i = 0; i < words; i++) {
+ if (hw->eeprom_shadow_ram != NULL &&
+ hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
+ data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+ } else {
+ /* The NVM part needs a byte offset, hence * 2 */
+ act_offset = bank_offset + ((offset + i) * 2);
+ error = e1000_read_ich8_word(hw, act_offset, &word);
+ if (error != E1000_SUCCESS)
+ break;
+ data[i] = word;
+ }
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register. Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+static int32_t
+e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+ uint16_t *data)
+{
+ uint32_t i = 0;
+ int32_t error = E1000_SUCCESS;
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ /* A driver can write to the NVM only if it has eeprom_shadow_ram
+ * allocated. Subsequent reads to the modified words are read from
+ * this cached structure as well. Writes will only go into this
+ * cached structure unless it's followed by a call to
+ * e1000_update_eeprom_checksum() where it will commit the changes
+ * and clear the "modified" field.
+ */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < words; i++) {
+ if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+ hw->eeprom_shadow_ram[offset+i].modified = TRUE;
+ hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+ } else {
+ error = -E1000_ERR_EEPROM;
+ break;
+ }
+ }
+ } else {
+ /* Drivers have the option to not allocate eeprom_shadow_ram as long
+ * as they don't perform any NVM writes. An attempt in doing so
+ * will result in this error.
+ */
+ error = -E1000_ERR_EEPROM;
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static int32_t
+e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ int32_t error = E1000_ERR_EEPROM;
+ int32_t i = 0;
+
+ DEBUGFUNC("e1000_ich8_cycle_init");
+
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+ /* May be check the Flash Des Valid bit in Hw status */
+ if (hsfsts.hsf_status.fldesvalid == 0) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
+ return error;
+ }
+
+ /* Clear FCERR in Hw status by writing 1 */
+ /* Clear DAEL in Hw status by writing a 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress bit to check
+ * against, in order to start a new cycle or FDONE bit should be changed
+ * in the hardware so that it is 1 after harware reset, which can then be
+ * used as an indication whether a cycle is in progress or has been
+ * completed .. we should also have some software semaphore mechanism to
+ * guard FDONE or the cycle in progress bit so that two threads access to
+ * those bits can be sequentiallized or a way so that 2 threads dont
+ * start the cycle at the same time */
+
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ /* There is no cycle running at present, so we can start a cycle */
+ /* Begin by setting Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ error = E1000_SUCCESS;
+ } else {
+ /* otherwise poll for sometime so the current cycle has a chance
+ * to end before giving up. */
+ for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ error = E1000_SUCCESS;
+ break;
+ }
+ udelay(1);
+ }
+ if (error == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access");
+ }
+ }
+ return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static int32_t
+e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ int32_t error = E1000_ERR_EEPROM;
+ uint32_t i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone == 1)
+ break;
+ udelay(1);
+ i++;
+ } while (i < timeout);
+ if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+ error = E1000_SUCCESS;
+ }
+ return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+static int32_t
+e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+ uint32_t size, uint16_t* data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ uint32_t flash_data = 0;
+ int32_t error = -E1000_ERR_EEPROM;
+ int32_t count = 0;
+
+ DEBUGFUNC("e1000_read_ich8_data");
+
+ if (size < 1 || size > 2 || data == 0x0 ||
+ index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ /* TODO: TBD maybe check the index against the size of flash */
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+ * sequence a few more times, else read in (shift in) the Flash Data0,
+ * the order is least significant byte first msb to lsb */
+ if (error == E1000_SUCCESS) {
+ flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+ if (size == 1) {
+ *data = (uint8_t)(flash_data & 0x000000FF);
+ } else if (size == 2) {
+ *data = (uint16_t)(flash_data & 0x0000FFFF);
+ }
+ break;
+ } else {
+ /* If we've gotten here, then things are probably completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+static int32_t
+e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
+ uint16_t data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ uint32_t flash_data = 0;
+ int32_t error = -E1000_ERR_EEPROM;
+ int32_t count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (size < 1 || size > 2 || data > size * 0xff ||
+ index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ if (size == 1)
+ flash_data = (uint32_t)data & 0x00FF;
+ else
+ flash_data = (uint32_t)data;
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+ * sequence a few more times else done */
+ error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ /* If we're here, then things are most likely completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+static int32_t
+e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
+{
+ int32_t status = E1000_SUCCESS;
+ uint16_t word = 0;
+
+ status = e1000_read_ich8_data(hw, index, 1, &word);
+ if (status == E1000_SUCCESS) {
+ *data = (uint8_t)word;
+ }
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+static int32_t
+e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
+{
+ int32_t error = E1000_SUCCESS;
+ int32_t program_retries;
+ uint8_t temp_byte;
+
+ e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e1000_read_ich8_byte(hw, index, &temp_byte);
+ if (temp_byte == byte)
+ break;
+ udelay(10);
+ e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+ }
+ if (program_retries == 100)
+ error = E1000_ERR_EEPROM;
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+static int32_t
+e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
+{
+ int32_t status = E1000_SUCCESS;
+ uint16_t word = (uint16_t)data;
+
+ status = e1000_write_ich8_data(hw, index, 1, word);
+
+ return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+static int32_t
+e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
+{
+ int32_t status = E1000_SUCCESS;
+ status = e1000_read_ich8_data(hw, index, 2, data);
+ return status;
+}
+
+/******************************************************************************
+ * Writes a word to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - The word to write to the NVM.
+ *****************************************************************************/
+#if 0
+int32_t
+e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
+{
+ int32_t status = E1000_SUCCESS;
+ status = e1000_write_ich8_data(hw, index, 2, data);
+ return status;
+}
+#endif /* 0 */
+
+/******************************************************************************
+ * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
+ * segment N is 4096 * N + flash_reg_addr.
+ *
+ * hw - pointer to e1000_hw structure
+ * segment - 0 for first segment, 1 for second segment, etc.
+ *****************************************************************************/
+static int32_t
+e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ int32_t count = 0;
+ int32_t error = E1000_ERR_EEPROM;
+ int32_t iteration, seg_size;
+ int32_t sector_size;
+ int32_t j = 0;
+ int32_t error_flag = 0;
+
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+ /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector can be
+ * calculated as = segment * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = segment * 4096
+ * 10: Error condition
+ * 11: The Hw sector size is much bigger than the size asked to
+ * erase...error condition */
+ if (hsfsts.hsf_status.berasesz == 0x0) {
+ /* Hw sector size 256 */
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256;
+ iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+ } else if (hsfsts.hsf_status.berasesz == 0x1) {
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ } else if (hsfsts.hsf_status.berasesz == 0x3) {
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ } else {
+ return error;
+ }
+
+ for (j = 0; j < iteration ; j++) {
+ do {
+ count++;
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS) {
+ error_flag = 1;
+ break;
+ }
+
+ /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+ * Control */
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the block into Flash
+ * Linear address field in Flash Address. This probably needs to
+ * be calculated here based off the on-chip segment size and the
+ * software segment size assumed (4K) */
+ /* TBD */
+ flash_linear_address = segment * sector_size + j * seg_size;
+ flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+ flash_linear_address += hw->flash_base_addr;
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, 1000000);
+ /* Check if FCERR is set to 1. If 1, clear it and try the whole
+ * sequence a few more times else Done */
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* repeat for some time before giving up */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ error_flag = 1;
+ break;
+ }
+ }
+ } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ if (error_flag == 1)
+ break;
+ }
+ if (error_flag != 1)
+ error = E1000_SUCCESS;
+ return error;
+}
+
+/******************************************************************************
+ *
+ * Reverse duplex setting without breaking the link.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ *****************************************************************************/
+#if 0
+int32_t
+e1000_duplex_reversal(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data ^= MII_CR_FULL_DUPLEX;
+
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET;
+ ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data);
+
+ return ret_val;
+}
+#endif /* 0 */
+
+static int32_t
+e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ uint32_t cnf_base_addr, uint32_t cnf_size)
+{
+ uint32_t ret_val = E1000_SUCCESS;
+ uint16_t word_addr, reg_data, reg_addr;
+ uint16_t i;
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (uint16_t)(cnf_base_addr << 1);
+
+ /* cnf_size is returned in size of dwords */
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_get_software_flag(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
+
+ e1000_release_software_flag(hw);
+ }
+
+ return ret_val;
+}
+
+
+static int32_t
+e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+ uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Check if SW needs configure the PHY */
+ reg_data = E1000_READ_REG(hw, FEXTNVM);
+ if (!(reg_data & FEXTNVM_SW_CONFIG))
+ return E1000_SUCCESS;
+
+ /* Wait for basic configuration completes before proceeding*/
+ loop = 0;
+ do {
+ reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ loop++;
+ } while ((!reg_data) && (loop < 50));
+
+ /* Clear the Init Done bit for the next init event */
+ reg_data = E1000_READ_REG(hw, STATUS);
+ reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+ E1000_WRITE_REG(hw, STATUS, reg_data);
+
+ /* Make sure HW does not configure LCD from PHY extended configuration
+ before SW configuration */
+ reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+ reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
+ cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+ cnf_size >>= 16;
+ if (cnf_size) {
+ reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+ /* cnf_base_addr is in DWORD */
+ cnf_base_addr >>= 16;
+
+ /* Configure LCD from extended configuration region. */
+ ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+ cnf_size);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 467c9ed944f8..375b95518c31 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -62,6 +62,7 @@ typedef enum {
e1000_82572,
e1000_82573,
e1000_80003es2lan,
+ e1000_ich8lan,
e1000_num_macs
} e1000_mac_type;
@@ -70,6 +71,7 @@ typedef enum {
e1000_eeprom_spi,
e1000_eeprom_microwire,
e1000_eeprom_flash,
+ e1000_eeprom_ich8,
e1000_eeprom_none, /* No NVM support */
e1000_num_eeprom_types
} e1000_eeprom_type;
@@ -98,6 +100,11 @@ typedef enum {
e1000_fc_default = 0xFF
} e1000_fc_type;
+struct e1000_shadow_ram {
+ uint16_t eeprom_word;
+ boolean_t modified;
+};
+
/* PCI bus types */
typedef enum {
e1000_bus_type_unknown = 0,
@@ -218,6 +225,8 @@ typedef enum {
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
e1000_phy_undefined = 0xFF
} e1000_phy_type;
@@ -313,10 +322,9 @@ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy
int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
-int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
-int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
/* EEPROM Functions */
int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -331,6 +339,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_ICH_IAMT_MODE 0x2
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
@@ -386,11 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
int32_t e1000_read_mac_addr(struct e1000_hw * hw);
-int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
-void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
/* Filters (multicast, vlan, receive) */
-void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,6 +407,7 @@ int32_t e1000_setup_led(struct e1000_hw *hw);
int32_t e1000_cleanup_led(struct e1000_hw *hw);
int32_t e1000_led_on(struct e1000_hw *hw);
int32_t e1000_led_off(struct e1000_hw *hw);
+int32_t e1000_blink_led_start(struct e1000_hw *hw);
/* Adaptive IFS Functions */
@@ -414,15 +421,16 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw);
void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
/* Port I/O is only supported on 82544 and newer */
-uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
-uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
-void e1000_enable_pciex_master(struct e1000_hw *hw);
int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
-int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
-void e1000_release_software_semaphore(struct e1000_hw *hw);
int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
+
/* PCI Device IDs */
#define E1000_DEV_ID_82542 0x1000
#define E1000_DEV_ID_82543GC_FIBER 0x1001
@@ -446,6 +454,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
#define E1000_DEV_ID_82541EI 0x1013
#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
#define E1000_DEV_ID_82541ER 0x1078
#define E1000_DEV_ID_82547GI 0x1075
#define E1000_DEV_ID_82541GI 0x1076
@@ -457,18 +466,28 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
#define NODE_ADDRESS_SIZE 6
@@ -539,6 +558,14 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
+/* Additional interrupts need to be handled for e1000_ich8lan:
+ DSW = The FW changed the status of the DISSW bit in FWSM
+ PHYINT = The LAN connected device generates an interrupt
+ EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+ E1000_IMS_DSW | \
+ E1000_IMS_PHYINT | \
+ E1000_IMS_EPRST)
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor. We
@@ -546,6 +573,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
* E1000_RAR_ENTRIES - 1 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
+#define E1000_RAR_ENTRIES_ICH8LAN 7
#define MIN_NUMBER_OF_DESCRIPTORS 8
#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
@@ -767,6 +795,9 @@ struct e1000_data_desc {
#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+#define E1000_NUM_UNICAST_ICH8LAN 7
+#define E1000_MC_TBL_SIZE_ICH8LAN 32
+
/* Receive Address Register */
struct e1000_rar {
@@ -776,6 +807,7 @@ struct e1000_rar {
/* Number of entries in the Multicast Table Array (MTA). */
#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
/* IPv4 Address Table Entry */
struct e1000_ipv4_at_entry {
@@ -786,6 +818,7 @@ struct e1000_ipv4_at_entry {
/* Four wakeup IP addresses are supported */
#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN 3
#define E1000_IP6AT_SIZE 1
/* IPv6 Address Table Entry */
@@ -844,6 +877,7 @@ struct e1000_ffvt_entry {
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
@@ -872,6 +906,8 @@ struct e1000_ffvt_entry {
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG 0x0001
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -899,11 +935,13 @@ struct e1000_ffvt_entry {
#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
-#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -1050,6 +1088,7 @@ struct e1000_ffvt_entry {
#define E1000_82542_FLA E1000_FLA
#define E1000_82542_MDIC E1000_MDIC
#define E1000_82542_SCTL E1000_SCTL
+#define E1000_82542_FEXTNVM E1000_FEXTNVM
#define E1000_82542_FCAL E1000_FCAL
#define E1000_82542_FCAH E1000_FCAH
#define E1000_82542_FCT E1000_FCT
@@ -1073,6 +1112,19 @@ struct e1000_ffvt_entry {
#define E1000_82542_RDLEN0 E1000_82542_RDLEN
#define E1000_82542_RDH0 E1000_82542_RDH
#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
#define E1000_82542_RDTR1 0x00130
#define E1000_82542_RDBAL1 0x00138
#define E1000_82542_RDBAH1 0x0013C
@@ -1110,11 +1162,14 @@ struct e1000_ffvt_entry {
#define E1000_82542_FLOP E1000_FLOP
#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
#define E1000_82542_ERT E1000_ERT
#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RXDCTL1 E1000_RXDCTL1
#define E1000_82542_RADV E1000_RADV
#define E1000_82542_RSRPD E1000_RSRPD
#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_KABGTXD E1000_KABGTXD
#define E1000_82542_TDFHS E1000_TDFHS
#define E1000_82542_TDFTS E1000_TDFTS
#define E1000_82542_TDFPC E1000_TDFPC
@@ -1310,13 +1365,16 @@ struct e1000_hw_stats {
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw {
- uint8_t __iomem *hw_addr;
+ uint8_t *hw_addr;
uint8_t *flash_address;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
uint32_t phy_init_script;
e1000_media_type media_type;
void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ uint32_t flash_bank_size;
+ uint32_t flash_base_addr;
e1000_fc_type fc;
e1000_bus_speed bus_speed;
e1000_bus_width bus_width;
@@ -1328,6 +1386,7 @@ struct e1000_hw {
uint32_t asf_firmware_present;
uint32_t eeprom_semaphore_present;
uint32_t swfw_sync_present;
+ uint32_t swfwhw_semaphore_present;
unsigned long io_base;
uint32_t phy_id;
uint32_t phy_revision;
@@ -1387,6 +1446,7 @@ struct e1000_hw {
boolean_t in_ifs_mode;
boolean_t mng_reg_access_disabled;
boolean_t leave_av_bit_off;
+ boolean_t kmrn_lock_loss_workaround_disabled;
};
@@ -1435,6 +1495,7 @@ struct e1000_hw {
#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -1449,6 +1510,8 @@ struct e1000_hw {
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
@@ -1506,6 +1569,10 @@ struct e1000_hw {
#define E1000_STM_OPCODE 0xDB00
#define E1000_HICR_FW_RESET 0xC0
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH8_NVM_SIG_WORD 0x13
+#define E1000_ICH8_NVM_SIG_MASK 0xC0
+
/* EEPROM Read */
#define E1000_EERD_START 0x00000001 /* Start Read */
#define E1000_EERD_DONE 0x00000010 /* Read Done */
@@ -1551,7 +1618,6 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
-#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
@@ -1591,12 +1657,31 @@ struct e1000_hw {
#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
/* Half-Duplex Control */
#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN 0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+#define E1000_PHY_CTRL_B2B_EN 0x00000080
+
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
@@ -1666,6 +1751,9 @@ struct e1000_hw {
#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
/* Interrupt Cause Set */
#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1692,6 +1780,9 @@ struct e1000_hw {
#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1718,6 +1809,9 @@ struct e1000_hw {
#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Clear */
#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1744,6 +1838,9 @@ struct e1000_hw {
#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
/* Receive Control */
#define E1000_RCTL_RST 0x00000001 /* Software reset */
@@ -1918,9 +2015,10 @@ struct e1000_hw {
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
@@ -2010,6 +2108,15 @@ struct e1000_hw {
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT 29
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
/* FFLT Debug Register */
#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
@@ -2082,6 +2189,8 @@ struct e1000_host_command_info {
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2140,8 +2249,10 @@ struct e1000_host_command_info {
#define EEPROM_PHY_CLASS_WORD 0x0007
#define EEPROM_INIT_CONTROL1_REG 0x000A
#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
#define EEPROM_CFG 0x0012
#define EEPROM_FLASH_VERSION 0x0032
@@ -2153,10 +2264,16 @@ struct e1000_host_command_info {
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573 0xF746
+#define ID_LED_DEFAULT_82573 0x1811
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_OFF2 << 8) | \
+ (ID_LED_DEF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
@@ -2191,6 +2308,11 @@ struct e1000_host_command_info {
#define EEPROM_WORD0F_ASM_DIR 0x2000
#define EEPROM_WORD0F_ANE 0x0800
#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU 0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
/* Mask bits for fields in Word 0x1a of the EEPROM */
#define EEPROM_WORD1A_ASPM_MASK 0x000C
@@ -2265,23 +2387,29 @@ struct e1000_host_command_info {
#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
#define E1000_PBA_30K 0x001E
#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
#define E1000_PBA_38K 0x0026
#define E1000_PBA_40K 0x0028
#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+#define E1000_PBS_16K E1000_PBA_16K
+
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
@@ -2336,7 +2464,7 @@ struct e1000_host_command_info {
/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
#define AUTO_READ_DONE_TIMEOUT 10
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
-#define PHY_CFG_TIMEOUT 40
+#define PHY_CFG_TIMEOUT 100
#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
@@ -2764,6 +2892,17 @@ struct e1000_host_command_info {
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+
/* IGP01E1000 Specific Port Config Register - R/W */
#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
#define IGP01E1000_PSCFR_PRE_EN 0x0020
@@ -2990,6 +3129,221 @@ struct e1000_host_command_info {
#define L1LXT971A_PHY_ID 0x001378E0
#define GG82563_E_PHY_ID 0x01410CA0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) \
+ (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL \
+ PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+
+#define IGP3_CAPABILITY \
+ PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN 0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND 0x0080
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT 5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
+
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT 6
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */
+#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */
+#define ICH8_FLASH_SEG_SIZE_256 256
+#define ICH8_FLASH_SEG_SIZE_4K 4096
+#define ICH8_FLASH_SEG_SIZE_64K 65536
+
+#define ICH8_CYCLE_READ 0x0
+#define ICH8_CYCLE_RESERVED 0x1
+#define ICH8_CYCLE_WRITE 0x2
+#define ICH8_CYCLE_ERASE 0x3
+
+#define ICH8_FLASH_GFPREG 0x0000
+#define ICH8_FLASH_HSFSTS 0x0004
+#define ICH8_FLASH_HSFCTL 0x0006
+#define ICH8_FLASH_FADDR 0x0008
+#define ICH8_FLASH_FDATA0 0x0010
+#define ICH8_FLASH_FRACC 0x0050
+#define ICH8_FLASH_FREG0 0x0054
+#define ICH8_FLASH_FREG1 0x0058
+#define ICH8_FLASH_FREG2 0x005C
+#define ICH8_FLASH_FREG3 0x0060
+#define ICH8_FLASH_FPR0 0x0074
+#define ICH8_FLASH_FPR1 0x0078
+#define ICH8_FLASH_SSFSTS 0x0090
+#define ICH8_FLASH_SSFCTL 0x0092
+#define ICH8_FLASH_PREOP 0x0094
+#define ICH8_FLASH_OPTYPE 0x0096
+#define ICH8_FLASH_OPMENU 0x0098
+
+#define ICH8_FLASH_REG_MAPSIZE 0x00A0
+#define ICH8_FLASH_SECTOR_SIZE 4096
+#define ICH8_GFPREG_BASE_MASK 0x1FFF
+#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+#ifdef E1000_BIG_ENDIAN
+ uint16_t reserved2 :6;
+ uint16_t fldesvalid :1;
+ uint16_t flockdn :1;
+ uint16_t flcdone :1;
+ uint16_t flcerr :1;
+ uint16_t dael :1;
+ uint16_t berasesz :2;
+ uint16_t flcinprog :1;
+ uint16_t reserved1 :2;
+#else
+ uint16_t flcdone :1; /* bit 0 Flash Cycle Done */
+ uint16_t flcerr :1; /* bit 1 Flash Cycle Error */
+ uint16_t dael :1; /* bit 2 Direct Access error Log */
+ uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */
+ uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */
+ uint16_t reserved1 :2; /* bit 13:6 Reserved */
+ uint16_t reserved2 :6; /* bit 13:6 Reserved */
+ uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+ uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */
+#endif
+ } hsf_status;
+ uint16_t regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+#ifdef E1000_BIG_ENDIAN
+ uint16_t fldbcount :2;
+ uint16_t flockdn :6;
+ uint16_t flcgo :1;
+ uint16_t flcycle :2;
+ uint16_t reserved :5;
+#else
+ uint16_t flcgo :1; /* 0 Flash Cycle Go */
+ uint16_t flcycle :2; /* 2:1 Flash Cycle */
+ uint16_t reserved :5; /* 7:3 Reserved */
+ uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */
+ uint16_t flockdn :6; /* 15:10 Reserved */
+#endif
+ } hsf_ctrl;
+ uint16_t regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+#ifdef E1000_BIG_ENDIAN
+ uint32_t gmwag :8;
+ uint32_t gmrag :8;
+ uint32_t grwa :8;
+ uint32_t grra :8;
+#else
+ uint32_t grra :8; /* 0:7 GbE region Read Access */
+ uint32_t grwa :8; /* 8:15 GbE region Write Access */
+ uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */
+ uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */
+#endif
+ } hsf_flregacc;
+ uint16_t regval;
+};
+
/* Miscellaneous PHY bit definitions. */
#define PHY_PREAMBLE 0xFFFFFFFF
#define PHY_SOF 0x01
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f77624f5f17b..98ef9f85482f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
+#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1026),
INTEL_E1000_ETHERNET_DEVICE(0x1027),
INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1049),
+ INTEL_E1000_ETHERNET_DEVICE(0x104A),
+ INTEL_E1000_ETHERNET_DEVICE(0x104B),
+ INTEL_E1000_ETHERNET_DEVICE(0x104C),
+ INTEL_E1000_ETHERNET_DEVICE(0x104D),
INTEL_E1000_ETHERNET_DEVICE(0x105E),
INTEL_E1000_ETHERNET_DEVICE(0x105F),
INTEL_E1000_ETHERNET_DEVICE(0x1060),
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x109A),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BA),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BB),
/* required last entry */
{0,}
};
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
static void e1000_set_multi(struct net_device *netdev);
static void e1000_update_phy_info(unsigned long data);
static void e1000_watchdog(unsigned long data);
-static void e1000_watchdog_task(struct e1000_adapter *adapter);
static void e1000_82547_tx_fifo_stall(unsigned long data);
static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-#ifdef CONFIG_PM
static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
static int e1000_resume(struct pci_dev *pdev);
#endif
static void e1000_shutdown(struct pci_dev *pdev);
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = {
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
/* Power Managment Hooks */
-#ifdef CONFIG_PM
.suspend = e1000_suspend,
+#ifdef CONFIG_PM
.resume = e1000_resume,
#endif
.shutdown = e1000_shutdown,
@@ -261,6 +267,44 @@ e1000_exit_module(void)
module_exit(e1000_exit_module);
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int flags, err = 0;
+
+ flags = IRQF_SHARED;
+#ifdef CONFIG_PCI_MSI
+ if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ adapter->have_msi = TRUE;
+ if ((err = pci_enable_msi(adapter->pdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = FALSE;
+ }
+ }
+ if (adapter->have_msi)
+ flags &= ~IRQF_SHARED;
+#endif
+ if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
+ netdev->name, netdev)))
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+#endif
+}
+
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
+ uint32_t extcnf;
/* Let firmware taken over control of h/w */
switch (adapter->hw.mac_type) {
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
+ case e1000_ich8lan:
+ extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
default:
break;
}
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
+ uint32_t extcnf;
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm | E1000_SWSM_DRV_LOAD);
break;
+ case e1000_ich8lan:
+ extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
+ E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
+ extcnf | E1000_EXTCNF_CTRL_SWFLAG);
+ break;
default:
break;
}
@@ -387,18 +443,10 @@ int
e1000_up(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int i, err;
+ int i;
/* hardware has been reset, we need to reload some things */
- /* Reset the PHY if it was previously powered down */
- if (adapter->hw.media_type == e1000_media_type_copper) {
- uint16_t mii_reg;
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
- if (mii_reg & MII_CR_POWER_DOWN)
- e1000_phy_hw_reset(&adapter->hw);
- }
-
e1000_set_multi(netdev);
e1000_restore_vlan(adapter);
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter)
E1000_DESC_UNUSED(ring));
}
-#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->have_msi = TRUE;
- if ((err = pci_enable_msi(adapter->pdev))) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- adapter->have_msi = FALSE;
- }
- }
-#endif
- if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- netdev->name, netdev))) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
- return err;
- }
-
adapter->tx_queue_len = netdev->tx_queue_len;
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter)
return 0;
}
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+static void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+ uint16_t mii_reg = 0;
+
+ /* Just clear the power down bit to wake the phy back up */
+ if (adapter->hw.media_type == e1000_media_type_copper) {
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ }
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+ e1000_check_mng_mode(&adapter->hw);
+ /* Power down the PHY so no link is implied when interface is down
+ * The PHY cannot be powered down if any of the following is TRUE
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
+ adapter->hw.media_type == e1000_media_type_copper &&
+ !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+ !mng_mode_enabled &&
+ !e1000_check_phy_reset_block(&adapter->hw)) {
+ uint16_t mii_reg = 0;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ mdelay(1);
+ }
+}
+
void
e1000_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
- e1000_check_mng_mode(&adapter->hw);
e1000_irq_disable(adapter);
- free_irq(adapter->pdev->irq, netdev);
-#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2 &&
- adapter->have_msi == TRUE)
- pci_disable_msi(adapter->pdev);
-#endif
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter)
e1000_reset(adapter);
e1000_clean_all_tx_rings(adapter);
e1000_clean_all_rx_rings(adapter);
+}
- /* Power down the PHY so no link is implied when interface is down *
- * The PHY cannot be powered down if any of the following is TRUE *
- * (a) WoL is enabled
- * (b) AMT is active
- * (c) SoL/IDER session is active */
- if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
- adapter->hw.media_type == e1000_media_type_copper &&
- !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
- !mng_mode_enabled &&
- !e1000_check_phy_reset_block(&adapter->hw)) {
- uint16_t mii_reg;
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
- mdelay(1);
- }
+void
+e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
}
void
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter)
case e1000_82573:
pba = E1000_PBA_12K;
break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ break;
default:
pba = E1000_PBA_48K;
break;
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter)
/* Set the FC high water mark to 90% of the FIFO size.
* Required to clear last 3 LSB */
fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+ /* We can't use 90% on small FIFOs because the remainder
+ * would be less than 1 full frame. In this case, we size
+ * it to allow at least a full frame above the high water
+ * mark. */
+ if (pba < E1000_PBA_16K)
+ fc_high_water_mark = (pba * 1024) - 1600;
adapter->hw.fc_high_water = fc_high_water_mark;
adapter->hw.fc_low_water = fc_high_water_mark - 8;
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter)
e1000_reset_adaptive(&adapter->hw);
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+
+ if (!adapter->smart_power_down &&
+ (adapter->hw.mac_type == e1000_82571 ||
+ adapter->hw.mac_type == e1000_82572)) {
+ uint16_t phy_data = 0;
+ /* speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed */
+ e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ }
+
+ if (adapter->hw.mac_type < e1000_ich8lan)
+ /* FIXME: this code is duplicate and wrong for PCI Express */
if (adapter->en_mng_pt) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len;
+ unsigned long flash_start, flash_len;
static int cards_found = 0;
static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev,
if ((err = pci_enable_device(pdev)))
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
+ !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
+ (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n");
return err;
}
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev,
if ((err = e1000_sw_init(adapter)))
goto err_sw_init;
+ /* Flash BAR mapping must happen after e1000_sw_init
+ * because it depends on mac_type */
+ if ((adapter->hw.mac_type == e1000_ich8lan) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ flash_start = pci_resource_start(pdev, 1);
+ flash_len = pci_resource_len(pdev, 1);
+ adapter->hw.flash_address = ioremap(flash_start, flash_len);
+ if (!adapter->hw.flash_address) {
+ err = -EIO;
+ goto err_flashmap;
+ }
+ }
+
if ((err = e1000_check_phy_reset_block(&adapter->hw)))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ if (adapter->hw.mac_type == e1000_ich8lan)
+ netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
}
#ifdef NETIF_F_TSO
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /* hard_start_xmit is safe against parallel locking */
netdev->features |= NETIF_F_LLTX;
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+ /* initialize eeprom parameters */
+
+ if (e1000_init_eeprom_params(&adapter->hw)) {
+ E1000_ERR("EEPROM initialization failed\n");
+ return -EIO;
+ }
+
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev,
adapter->watchdog_timer.function = &e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
- INIT_WORK(&adapter->watchdog_task,
- (void (*)(void *))e1000_watchdog_task, adapter);
-
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = &e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM;
break;
+ case e1000_ich8lan:
+ e1000_read_eeprom(&adapter->hw,
+ EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+ break;
case e1000_82546:
case e1000_82546_rev_3:
case e1000_82571:
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev,
return 0;
err_register:
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+err_flashmap:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev)
flush_scheduled_work();
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev)
#endif
iounmap(adapter->hw.hw_addr);
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
pci_release_regions(pdev);
free_netdev(netdev);
@@ -947,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
- adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
hw->max_frame_size = netdev->mtu +
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
return -EIO;
}
- /* initialize eeprom parameters */
-
- if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
- return -EIO;
- }
-
switch (hw->mac_type) {
default:
break;
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
int err;
+ /* disallow open during test */
+ if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
+ return -EBUSY;
+
/* allocate transmit descriptors */
if ((err = e1000_setup_all_tx_resources(adapter)))
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev)
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_up;
+
+ e1000_power_up_phy(adapter);
+
if ((err = e1000_up(adapter)))
goto err_up;
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
e1000_free_all_tx_resources(adapter);
e1000_free_all_rx_resources(adapter);
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
-
- txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+ txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the transmit descriptor ring\n");
@@ -1302,13 +1428,13 @@ e1000_configure_tx(struct e1000_adapter *adapter)
tdba = adapter->tx_ring[0].dma;
tdlen = adapter->tx_ring[0].count *
sizeof(struct e1000_tx_desc);
- E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
E1000_WRITE_REG(hw, TDLEN, tdlen);
- E1000_WRITE_REG(hw, TDH, 0);
+ E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+ E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, TDT, 0);
- adapter->tx_ring[0].tdh = E1000_TDH;
- adapter->tx_ring[0].tdt = E1000_TDT;
+ E1000_WRITE_REG(hw, TDH, 0);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+ rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the receive descriptor ring\n");
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
- if (adapter->hw.mac_type > e1000_82543)
- rctl |= E1000_RCTL_SECRC;
-
if (adapter->hw.tbi_compatibility_on == 1)
rctl |= E1000_RCTL_SBP;
else
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rfctl |= E1000_RFCTL_IPV6_DIS;
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
- rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
+ rctl |= E1000_RCTL_DTYP_PS;
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1712,13 +1835,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
case 1:
default:
rdba = adapter->rx_ring[0].dma;
- E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
E1000_WRITE_REG(hw, RDLEN, rdlen);
- E1000_WRITE_REG(hw, RDH, 0);
+ E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+ E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, RDT, 0);
- adapter->rx_ring[0].rdh = E1000_RDH;
- adapter->rx_ring[0].rdt = E1000_RDT;
+ E1000_WRITE_REG(hw, RDH, 0);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
- if (hw->mac_type == e1000_82573)
- E1000_WRITE_REG(hw, ERT, 0x0100);
-
/* Enable Receives */
E1000_WRITE_REG(hw, RCTL, rctl);
}
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev)
uint32_t rctl;
uint32_t hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
+ int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+ E1000_NUM_MTA_REGISTERS_ICH8LAN :
+ E1000_NUM_MTA_REGISTERS;
+
+ if (adapter->hw.mac_type == e1000_ich8lan)
+ rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
/* reserve RAR[14] for LAA over-write work-around */
if (adapter->hw.mac_type == e1000_82571)
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev)
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH(hw);
}
}
/* clear the old settings from the multicast hash table */
- for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+ for (i = 0; i < mta_reg_count; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
/* load any remaining addresses into the hash table */
@@ -2201,19 +2331,19 @@ static void
e1000_watchdog(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-}
-
-static void
-e1000_watchdog_task(struct e1000_adapter *adapter)
-{
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
uint32_t link, tctl;
-
- e1000_check_for_link(&adapter->hw);
+ int32_t ret_val;
+
+ ret_val = e1000_check_for_link(&adapter->hw);
+ if ((ret_val == E1000_ERR_PHY) &&
+ (adapter->hw.phy_type == e1000_phy_igp_3) &&
+ (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kumeran_lock_loss_workaround() */
+ DPRINTK(LINK, INFO,
+ "Gigabit has been disabled, downgrading speed\n");
+ }
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size) {
+ !skb_is_gso(skb)) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
case e1000_82571:
case e1000_82572:
case e1000_82573:
+ case e1000_ich8lan:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR
+ DPRINTK(DRV, ERR,
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
- if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size)
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++;
#endif
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_down(adapter);
- e1000_up(adapter);
+ e1000_reinit_locked(adapter);
}
/**
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (adapter->hw.mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
+ case e1000_ich8lan:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
@@ -2997,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
break;
}
- /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size
* i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3018,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
/* adjust allocation if LPE protects us, and we aren't using SBP */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
if (!adapter->hw.tbi_compatibility_on &&
((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3026,10 +3155,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- }
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
adapter->hw.max_frame_size = max_frame;
@@ -3074,12 +3201,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
adapter->stats.roc += E1000_READ_REG(hw, ROC);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+ }
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
adapter->stats.mpc += E1000_READ_REG(hw, MPC);
@@ -3107,12 +3237,16 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.totl += E1000_READ_REG(hw, TOTL);
adapter->stats.toth += E1000_READ_REG(hw, TOTH);
adapter->stats.tpr += E1000_READ_REG(hw, TPR);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ }
+
adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
@@ -3134,6 +3268,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
if (hw->mac_type > e1000_82547_rev_2) {
adapter->stats.iac += E1000_READ_REG(hw, IAC);
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
@@ -3141,6 +3277,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ }
}
/* Fill out the OS statistics structure */
@@ -3249,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
- __netif_rx_schedule(&adapter->polling_netdev[0]);
+ if (likely(netif_rx_schedule_prep(netdev)))
+ __netif_rx_schedule(netdev);
else
e1000_irq_enable(adapter);
#else
@@ -3293,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
- int tx_cleaned = 0, i = 0, work_done = 0;
+ int tx_cleaned = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
/* Keep link state information with original netdev */
- if (!netif_carrier_ok(adapter->netdev))
+ if (!netif_carrier_ok(poll_dev))
goto quit_polling;
- while (poll_dev != &adapter->polling_netdev[i]) {
- i++;
- BUG_ON(i == adapter->num_rx_queues);
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
}
- if (likely(adapter->num_tx_queues == 1)) {
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
- } else
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
-
- adapter->clean_rx(adapter, &adapter->rx_ring[i],
+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, work_to_do);
*budget -= work_done;
@@ -3328,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done == 0)) ||
- !netif_running(adapter->netdev)) {
+ !netif_running(poll_dev)) {
quit_polling:
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
@@ -3543,11 +3672,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
+ /* adjust length to remove Ethernet CRC */
+ length -= 4;
+
if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
- dev_kfree_skb_irq(skb);
+ /* recycle */
+ buffer_info-> skb = skb;
goto next_desc;
}
@@ -3575,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
#define E1000_CB_LENGTH 256
if (length < E1000_CB_LENGTH) {
struct sk_buff *new_skb =
- dev_alloc_skb(length + NET_IP_ALIGN);
+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
new_skb->dev = netdev;
@@ -3675,7 +3808,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
- buffer_info = &rx_ring->buffer_info[i];
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
#ifdef CONFIG_E1000_NAPI
@@ -3747,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
pci_dma_sync_single_for_device(pdev,
ps_page_dma->ps_page_dma[0],
PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ /* remove the CRC */
+ l1 -= 4;
skb_put(skb, l1);
- length += l1;
goto copydone;
} /* if */
}
@@ -3767,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb->truesize += length;
}
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive */
+ pskb_trim(skb, skb->len - 4);
+
copydone:
e1000_rx_checksum(adapter, staterr,
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -3842,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
while (cleaned_count--) {
if (!(skb = buffer_info->skb))
- skb = dev_alloc_skb(bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz);
else {
skb_trim(skb, 0);
goto map_skb;
@@ -3860,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
"at %p\n", bufsz, skb->data);
/* Try again, without freeing the previous */
- skb = dev_alloc_skb(bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz);
/* Failed allocation, critical failure */
if (!skb) {
dev_kfree_skb(oldskb);
@@ -3984,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[j+1] = ~0;
}
- skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(netdev,
+ adapter->rx_ps_bsize0 + NET_IP_ALIGN);
if (unlikely(!skb)) {
adapter->alloc_rx_buff_failed++;
@@ -4180,10 +4318,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
}
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
break;
case M88E1000_PHY_SPEC_CTRL:
@@ -4200,10 +4337,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
break;
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
break;
}
@@ -4250,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
pci_write_config_word(adapter->pdev, reg, *value);
}
+#if 0
uint32_t
e1000_io_read(struct e1000_hw *hw, unsigned long port)
{
return inl(port);
}
+#endif /* 0 */
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
@@ -4277,18 +4415,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
/* enable VLAN receive filtering */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_CFIEN;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
e1000_update_mng_vlan(adapter);
+ }
} else {
/* disable VLAN tag insert/strip */
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl &= ~E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
/* disable VLAN filtering */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_VFE;
@@ -4297,6 +4438,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
+ }
}
e1000_irq_enable(adapter);
@@ -4458,12 +4600,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol;
+#ifdef CONFIG_PM
int retval = 0;
+#endif
netif_device_detach(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
+ }
#ifdef CONFIG_PM
/* Implement our own version of pci_save_state(pdev) because pci-
@@ -4521,7 +4667,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0);
}
+ /* FIXME: this code is incorrect for PCI Express */
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
@@ -4532,6 +4680,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
}
}
+ if (adapter->hw.phy_type == e1000_phy_igp_3)
+ e1000_phy_powerdown_workaround(&adapter->hw);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);
@@ -4567,7 +4718,9 @@ e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
+ /* FIXME: this code is incorrect for PCI Express */
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
@@ -4601,6 +4754,7 @@ static void
e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 048d052be29d..2d3e8b06cab0 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -127,4 +127,17 @@ typedef enum {
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG(a, reg) ( \
+ readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG16(a, reg) ( \
+ readw((a)->flash_address + reg))
+
+
#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e55f8969a0fb..0ef413172c68 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -45,6 +45,16 @@
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static int num_##X = 0; \
@@ -183,6 +193,24 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
@@ -296,6 +324,7 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd);
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ bd = E1000_MAX_NIC;
}
{ /* Transmit Descriptor Count */
@@ -313,14 +342,9 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD;
- if (num_TxDescriptors > bd) {
- tx_ring->count = TxDescriptors[bd];
- e1000_validate_option(&tx_ring->count, &opt, adapter);
- E1000_ROUNDUP(tx_ring->count,
- REQ_TX_DESCRIPTOR_MULTIPLE);
- } else {
- tx_ring->count = opt.def;
- }
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
@@ -339,14 +363,9 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
- if (num_RxDescriptors > bd) {
- rx_ring->count = RxDescriptors[bd];
- e1000_validate_option(&rx_ring->count, &opt, adapter);
- E1000_ROUNDUP(rx_ring->count,
- REQ_RX_DESCRIPTOR_MULTIPLE);
- } else {
- rx_ring->count = opt.def;
- }
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
@@ -358,13 +377,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED
};
- if (num_XsumRX > bd) {
- int rx_csum = XsumRX[bd];
- e1000_validate_option(&rx_csum, &opt, adapter);
- adapter->rx_csum = rx_csum;
- } else {
- adapter->rx_csum = opt.def;
- }
+ int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
}
{ /* Flow Control */
@@ -384,13 +399,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.p = fc_list }}
};
- if (num_FlowControl > bd) {
- int fc = FlowControl[bd];
- e1000_validate_option(&fc, &opt, adapter);
- adapter->hw.fc = adapter->hw.original_fc = fc;
- } else {
- adapter->hw.fc = adapter->hw.original_fc = opt.def;
- }
+ int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
}
{ /* Transmit Interrupt Delay */
struct e1000_option opt = {
@@ -402,13 +413,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXDELAY }}
};
- if (num_TxIntDelay > bd) {
- adapter->tx_int_delay = TxIntDelay[bd];
- e1000_validate_option(&adapter->tx_int_delay, &opt,
- adapter);
- } else {
- adapter->tx_int_delay = opt.def;
- }
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt, adapter);
}
{ /* Transmit Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -420,13 +426,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXABSDELAY }}
};
- if (num_TxAbsIntDelay > bd) {
- adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
- e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
- } else {
- adapter->tx_abs_int_delay = opt.def;
- }
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
}
{ /* Receive Interrupt Delay */
struct e1000_option opt = {
@@ -438,13 +440,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY }}
};
- if (num_RxIntDelay > bd) {
- adapter->rx_int_delay = RxIntDelay[bd];
- e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
- } else {
- adapter->rx_int_delay = opt.def;
- }
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt, adapter);
}
{ /* Receive Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -456,13 +453,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY }}
};
- if (num_RxAbsIntDelay > bd) {
- adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
- e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
- } else {
- adapter->rx_abs_int_delay = opt.def;
- }
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
}
{ /* Interrupt Throttling Rate */
struct e1000_option opt = {
@@ -474,26 +467,44 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_ITR }}
};
- if (num_InterruptThrottleRate > bd) {
- adapter->itr = InterruptThrottleRate[bd];
- switch (adapter->itr) {
- case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
- break;
- case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
- break;
- default:
- e1000_validate_option(&adapter->itr, &opt,
- adapter);
- break;
- }
- } else {
- adapter->itr = opt.def;
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt, adapter);
+ break;
}
}
+ { /* Smart Power Down */
+ struct e1000_option opt = {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ }
+ { /* Kumeran Lock Loss Workaround */
+ struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ int kmrn_lock_loss = KumeranLockLoss[bd];
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+ }
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
@@ -519,17 +530,18 @@ static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
- if (num_Speed > bd) {
+ bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+ if ((Speed[bd] != OPTION_UNSET)) {
DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
"parameter ignored\n");
}
- if (num_Duplex > bd) {
+ if ((Duplex[bd] != OPTION_UNSET)) {
DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
"parameter ignored\n");
}
- if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
"not valid for fiber adapters, "
"parameter ignored\n");
@@ -548,6 +560,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
{
int speed, dplx, an;
int bd = adapter->bd_number;
+ bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
{ /* Speed */
struct e1000_opt_list speed_list[] = {{ 0, "" },
@@ -564,12 +577,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = speed_list }}
};
- if (num_Speed > bd) {
- speed = Speed[bd];
- e1000_validate_option(&speed, &opt, adapter);
- } else {
- speed = opt.def;
- }
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
}
{ /* Duplex */
struct e1000_opt_list dplx_list[] = {{ 0, "" },
@@ -591,15 +600,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
"Speed/Duplex/AutoNeg parameter ignored.\n");
return;
}
- if (num_Duplex > bd) {
- dplx = Duplex[bd];
- e1000_validate_option(&dplx, &opt, adapter);
- } else {
- dplx = opt.def;
- }
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
}
- if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
DPRINTK(PROBE, INFO,
"AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
@@ -648,19 +653,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = an_list }}
};
- if (num_AutoNeg > bd) {
- an = AutoNeg[bd];
- e1000_validate_option(&an, &opt, adapter);
- } else {
- an = opt.def;
- }
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
- if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+ if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
DPRINTK(PROBE, INFO,
"Speed and duplex autonegotiation enabled\n");
break;
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index e5c5cd2a2712..e4e733a380e3 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
-init_module(void)
+
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 20d31430c74f..8dc61d65dd23 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int i;
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 2ad327542927..e445988c92ee 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -555,12 +555,12 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
if (!request_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1), "eepro100")) {
- printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
+ dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
goto err_out_none;
}
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), "eepro100")) {
- printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
+ dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
goto err_out_free_pio_region;
}
@@ -573,7 +573,7 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
ioaddr = pci_iomap(pdev, pci_bar, 0);
if (!ioaddr) {
- printk (KERN_ERR "eepro100: cannot remap IO\n");
+ dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
goto err_out_free_mmio_region;
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 33291bcf6d4c..0701c1d810ca 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL");
* are specified, we verify and then use them. If no parameters are given, we
* autoprobe for one card only.
*/
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 9f3e09a3d88c..a67650ccf084 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -19,62 +19,15 @@
Information and updates available at
http://www.scyld.com/network/epic100.html
+ [this link no longer provides anything useful -jgarzik]
---------------------------------------------------------------------
- Linux kernel-specific changes:
-
- LK1.1.2 (jgarzik):
- * Merge becker version 1.09 (4/08/2000)
-
- LK1.1.3:
- * Major bugfix to 1.09 driver (Francis Romieu)
-
- LK1.1.4 (jgarzik):
- * Merge becker test version 1.09 (5/29/2000)
-
- LK1.1.5:
- * Fix locking (jgarzik)
- * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
-
- LK1.1.6:
- * Merge becker version 1.11
- * Move pci_enable_device before any PCI BAR len checks
-
- LK1.1.7:
- * { fill me in }
-
- LK1.1.8:
- * ethtool driver info support (jgarzik)
-
- LK1.1.9:
- * ethtool media get/set support (jgarzik)
-
- LK1.1.10:
- * revert MII transceiver init change (jgarzik)
-
- LK1.1.11:
- * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
- * replace some MII-related magic numbers with constants
-
- LK1.1.12:
- * fix power-up sequence
-
- LK1.1.13:
- * revert version 1.1.12, power-up sequence "fix"
-
- LK1.1.14 (Kryzsztof Halasa):
- * fix spurious bad initializations
- * pound phy a la SMSC's app note on the subject
-
- AC1.1.14ac
- * fix power up/down for ethtool that broke in 1.11
-
*/
#define DRV_NAME "epic100"
-#define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
-#define DRV_RELDATE "June 2, 2004"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "June 27, 2006"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -204,19 +157,15 @@ typedef enum {
struct epic_chip_info {
const char *name;
- int io_size; /* Needed for I/O region check or ioremap(). */
int drv_flags; /* Driver use, intended as capability flags. */
};
/* indexed by chip_t */
static const struct epic_chip_info pci_id_tbl[] = {
- { "SMSC EPIC/100 83c170",
- EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
- { "SMSC EPIC/100 83c170",
- EPIC_TOTAL_SIZE, TYPE2_INTR },
- { "SMSC EPIC/C 83c175",
- EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
+ { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
+ { "SMSC EPIC/100 83c170", TYPE2_INTR },
+ { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
};
@@ -385,8 +334,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
goto out;
irq = pdev->irq;
- if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
- printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
+ if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
+ dev_err(&pdev->dev, "no PCI region space\n");
ret = -ENODEV;
goto err_out_disable;
}
@@ -401,7 +350,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev = alloc_etherdev(sizeof (*ep));
if (!dev) {
- printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
+ dev_err(&pdev->dev, "no memory for eth device\n");
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
@@ -413,7 +362,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ioaddr = pci_resource_start (pdev, 1);
ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
if (!ioaddr) {
- printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
+ dev_err(&pdev->dev, "ioremap failed\n");
goto err_out_free_netdev;
}
#endif
@@ -473,8 +422,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
if (debug > 2) {
- printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
- pci_name(pdev));
+ dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
for (i = 0; i < 64; i++)
printk(" %4.4x%s", read_eeprom(ioaddr, i),
i % 16 == 15 ? "\n" : "");
@@ -496,21 +444,23 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
int mii_status = mdio_read(dev, phy, MII_BMSR);
if (mii_status != 0xffff && mii_status != 0x0000) {
ep->phys[phy_idx++] = phy;
- printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
- "%4.4x status %4.4x.\n",
- pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
+ dev_info(&pdev->dev,
+ "MII transceiver #%d control "
+ "%4.4x status %4.4x.\n",
+ phy, mdio_read(dev, phy, 0), mii_status);
}
}
ep->mii_phy_cnt = phy_idx;
if (phy_idx != 0) {
phy = ep->phys[0];
ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
- printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
+ dev_info(&pdev->dev,
+ "Autonegotiation advertising %4.4x link "
"partner %4.4x.\n",
- pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
+ ep->mii.advertising, mdio_read(dev, phy, 5));
} else if ( ! (ep->chip_flags & NO_MII)) {
- printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
- pci_name(pdev));
+ dev_warn(&pdev->dev,
+ "***WARNING***: No MII transceiver found!\n");
/* Use the known PHY address of the EPII. */
ep->phys[0] = 3;
}
@@ -525,8 +475,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
/* The lower four bits are the media type. */
if (duplex) {
ep->mii.force_media = ep->mii.full_duplex = 1;
- printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
- pci_name(pdev));
+ dev_info(&pdev->dev, "Forced full duplex requested.\n");
}
dev->if_port = ep->default_port = option;
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 6b0ab1eac3fb..fd7b32a24ea4 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
MODULE_LICENSE("GPL");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 4bf76f86d8e9..ca42efa9143c 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
-int init_module(void)
+int __init init_module(void)
{
int this_dev, found = 0;
struct net_device *dev;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index c701951dcd6f..567e27413cfd 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <asm/uaccess.h>
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
@@ -124,7 +124,9 @@ MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered mult
MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
-#define MIN_REGION_SIZE 136
+enum {
+ MIN_REGION_SIZE = 136,
+};
/* A chip capabilities table, matching the entries in pci_tbl[] above. */
enum chip_capability_flags {
@@ -146,14 +148,13 @@ enum phy_type_flags {
struct chip_info {
char *chip_name;
- int io_size;
int flags;
};
-static const struct chip_info skel_netdrv_tbl[] = {
- {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
- {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR},
- {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
+static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
};
/* Offsets to the Command and Status Registers. */
@@ -504,13 +505,14 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
len = pci_resource_len(pdev, bar);
if (len < MIN_REGION_SIZE) {
- printk(KERN_ERR "%s: region size %ld too small, aborting\n",
- boardname, len);
+ dev_err(&pdev->dev,
+ "region size %ld too small, aborting\n", len);
return -ENODEV;
}
i = pci_request_regions(pdev, boardname);
- if (i) return i;
+ if (i)
+ return i;
irq = pdev->irq;
@@ -576,9 +578,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phys[phy_idx++] = phy;
- printk(KERN_INFO
- "%s: MII PHY found at address %d, status "
- "0x%4.4x.\n", dev->name, phy, mii_status);
+ dev_info(&pdev->dev,
+ "MII PHY found at address %d, status "
+ "0x%4.4x.\n", phy, mii_status);
/* get phy type */
{
unsigned int data;
@@ -601,10 +603,10 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
}
np->mii_cnt = phy_idx;
- if (phy_idx == 0) {
- printk(KERN_WARNING "%s: MII PHY not found -- this device may "
- "not operate correctly.\n", dev->name);
- }
+ if (phy_idx == 0)
+ dev_warn(&pdev->dev,
+ "MII PHY not found -- this device may "
+ "not operate correctly.\n");
} else {
np->phys[0] = 32;
/* 89/6/23 add, (begin) */
@@ -630,7 +632,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
np->mii.full_duplex = full_duplex[card_idx];
if (np->mii.full_duplex) {
- printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
/* 89/6/13 add, (begin) */
// if (np->PHYType==MarvellPHY)
if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 037d870712ff..11b8f1b43dd5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -240,10 +240,12 @@ enum {
#define NVREG_RNDSEED_FORCE2 0x2d00
#define NVREG_RNDSEED_FORCE3 0x7400
- NvRegUnknownSetupReg1 = 0xA0,
-#define NVREG_UNKSETUP1_VAL 0x16070f
- NvRegUnknownSetupReg2 = 0xA4,
-#define NVREG_UNKSETUP2_VAL 0x16
+ NvRegTxDeferral = 0xA0,
+#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
+#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
+#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
+ NvRegRxDeferral = 0xA4,
+#define NVREG_RX_DEFERRAL_DEFAULT 0x16
NvRegMacAddrA = 0xA8,
NvRegMacAddrB = 0xAC,
NvRegMulticastAddrA = 0xB0,
@@ -269,8 +271,10 @@ enum {
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
- NvRegUnknownSetupReg3 = 0x13c,
-#define NVREG_UNKSETUP3_VAL1 0x200010
+ NvRegTxWatermark = 0x13c,
+#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
+#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
+#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
NvRegTxRxControl = 0x144,
#define NVREG_TXRXCTL_KICK 0x0001
#define NVREG_TXRXCTL_BIT1 0x0002
@@ -658,7 +662,7 @@ static const struct register_test nv_registers_test[] = {
{ NvRegMisc1, 0x03c },
{ NvRegOffloadConfig, 0x03ff },
{ NvRegMulticastAddrA, 0xffffffff },
- { NvRegUnknownSetupReg3, 0x0ff },
+ { NvRegTxWatermark, 0x0ff },
{ NvRegWakeUpFlags, 0x07777 },
{ 0,0 }
};
@@ -1495,7 +1499,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
@@ -2127,7 +2131,7 @@ static int nv_update_linkspeed(struct net_device *dev)
int newdup = np->duplex;
int mii_status;
int retval = 0;
- u32 control_1000, status_1000, phyreg, pause_flags;
+ u32 control_1000, status_1000, phyreg, pause_flags, txreg;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -2245,6 +2249,26 @@ set_speed:
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
@@ -3910,7 +3934,10 @@ static int nv_open(struct net_device *dev)
/* 5) continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
- writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
+ if (np->desc_ver == DESC_VER_1)
+ writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
+ else
+ writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
writel(np->vlanctl_bits, base + NvRegVlanControl);
pci_push(base);
@@ -3932,8 +3959,8 @@ static int nv_open(struct net_device *dev)
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&i, sizeof(i));
writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
- writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
- writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
+ writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
+ writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
if (poll_interval == -1) {
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index d6dd3f2fb43e..02d4dc18ba69 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_FS_ENET) += fs_enet.o
-obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
-obj-$(CONFIG_8260) += mac-fcc.o
+obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
+obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
-fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
+fs_enet-objs := fs_enet-main.o
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h
new file mode 100644
index 000000000000..e980527e2b99
--- /dev/null
+++ b/drivers/net/fs_enet/fec.h
@@ -0,0 +1,42 @@
+#ifndef FS_ENET_FEC_H
+#define FS_ENET_FEC_H
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+#endif
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f6abff5846b3..df62506a1787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq)
(*fep->ops->post_free_irq)(dev, irq);
}
-/**********************************************************************************/
-
-/* This interrupt occurs when the PHY detects a link change. */
-static irqreturn_t
-fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct net_device *dev = dev_id;
- struct fs_enet_private *fep;
- const struct fs_platform_info *fpi;
-
- fep = netdev_priv(dev);
- fpi = fep->fpi;
-
- /*
- * Acknowledge the interrupt if possible. If we have not
- * found the PHY yet we can't process or acknowledge the
- * interrupt now. Instead we ignore this interrupt for now,
- * which we can do since it is edge triggered. It will be
- * acknowledged later by fs_enet_open().
- */
- if (!fep->phy)
- return IRQ_NONE;
-
- fs_mii_ack_int(dev);
- fs_mii_link_status_change_check(dev, 0);
-
- return IRQ_HANDLED;
-}
-
static void fs_timeout(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev)
spin_lock_irqsave(&fep->lock, flags);
if (dev->flags & IFF_UP) {
+ phy_stop(fep->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
+ phy_start(fep->phydev);
}
+ phy_start(fep->phydev);
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+/*-----------------------------------------------------------------------------
+ * generic link-change handler - should be sufficient for most cases
+ *-----------------------------------------------------------------------------*/
+static void generic_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phydev;
+ int new_state = 0;
+
+ if (phydev->link) {
+
+ /* adjust to duplex mode */
+ if (phydev->duplex != fep->oldduplex){
+ new_state = 1;
+ fep->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != fep->oldspeed) {
+ new_state = 1;
+ fep->oldspeed = phydev->speed;
+ }
+
+ if (!fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 1;
+ netif_schedule(dev);
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ }
+
+ if (new_state)
+ fep->ops->restart(dev);
+
+ } else if (fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ }
+
+ if (new_state && netif_msg_link(fep))
+ phy_print_status(phydev);
+}
+
+
+static void fs_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if(fep->ops->adjust_link)
+ fep->ops->adjust_link(dev);
+ else
+ generic_adjust_link(dev);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fs_init_phy(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev;
+
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ if(fep->fpi->bus_id)
+ phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
+ else {
+ printk("No phy bus ID specified in BSP code\n");
+ return -EINVAL;
+ }
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ fep->phydev = phydev;
+
+ return 0;
+}
+
+
static int fs_enet_open(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
int r;
+ int err;
/* Install our interrupt handler. */
r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
if (r != 0) {
printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate FEC IRQ!", dev->name);
+ ": %s Could not allocate FS_ENET IRQ!", dev->name);
return -EINVAL;
}
- /* Install our phy interrupt handler */
- if (fpi->phy_irq != -1) {
-
- r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
- if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate PHY IRQ!", dev->name);
- fs_free_irq(dev, fep->interrupt);
- return -EINVAL;
- }
- }
+ err = fs_init_phy(dev);
+ if(err)
+ return err;
- fs_mii_startup(dev);
- netif_carrier_off(dev);
- fs_mii_link_status_change_check(dev, 1);
+ phy_start(fep->phydev);
return 0;
}
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev)
static int fs_enet_close(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
unsigned long flags;
netif_stop_queue(dev);
netif_carrier_off(dev);
- fs_mii_shutdown(dev);
+ phy_stop(fep->phydev);
spin_lock_irqsave(&fep->lock, flags);
(*fep->ops->stop)(dev);
spin_unlock_irqrestore(&fep->lock, flags);
/* release any irqs */
- if (fpi->phy_irq != -1)
- fs_free_irq(dev, fpi->phy_irq);
+ phy_disconnect(fep->phydev);
+ fep->phydev = NULL;
fs_free_irq(dev, fep->interrupt);
return 0;
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct fs_enet_private *fep = netdev_priv(dev);
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&fep->lock, flags);
- rc = mii_ethtool_gset(&fep->mii_if, cmd);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- return rc;
+ return phy_ethtool_gset(fep->phydev, cmd);
}
static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct fs_enet_private *fep = netdev_priv(dev);
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&fep->lock, flags);
- rc = mii_ethtool_sset(&fep->mii_if, cmd);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- return rc;
+ phy_ethtool_sset(fep->phydev, cmd);
+ return 0;
}
static int fs_nway_reset(struct net_device *dev)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- return mii_nway_restart(&fep->mii_if);
+ return 0;
}
static u32 fs_get_msglevel(struct net_device *dev)
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
spin_lock_irqsave(&fep->lock, flags);
- rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
+ rc = phy_mii_ioctl(fep->phydev, mii, cmd);
spin_unlock_irqrestore(&fep->lock, flags);
return rc;
}
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev,
}
registered = 1;
- err = fs_mii_connect(ndev);
- if (err != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s fs_mii_connect failed.\n", ndev->name);
- goto err;
- }
return ndev;
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev)
fpi = fep->fpi;
- fs_mii_disconnect(ndev);
-
unregister_netdev(ndev);
dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
@@ -1196,17 +1225,39 @@ static int __init fs_init(void)
r = setup_immap();
if (r != 0)
return r;
- r = driver_register(&fs_enet_fec_driver);
+
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ /* let's insert mii stuff */
+ r = fs_enet_mdio_bb_init();
+
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "BB PHY init failed.\n");
+ return r;
+ }
+ r = driver_register(&fs_enet_fcc_driver);
if (r != 0)
goto err;
+#endif
- r = driver_register(&fs_enet_fcc_driver);
+#ifdef CONFIG_FS_ENET_HAS_FEC
+ r = fs_enet_mdio_fec_init();
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "FEC PHY init failed.\n");
+ return r;
+ }
+
+ r = driver_register(&fs_enet_fec_driver);
if (r != 0)
goto err;
+#endif
+#ifdef CONFIG_FS_ENET_HAS_SCC
r = driver_register(&fs_enet_scc_driver);
if (r != 0)
goto err;
+#endif
return 0;
err:
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
deleted file mode 100644
index b7e6e21725cb..000000000000
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
- * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "fs_enet.h"
-
-/*************************************************/
-
-/*
- * Generic PHY support.
- * Should work for all PHYs, but link change is detected by polling
- */
-
-static void generic_timer_callback(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fep->phy_timer_list.expires = jiffies + HZ / 2;
-
- add_timer(&fep->phy_timer_list);
-
- fs_mii_link_status_change_check(dev, 0);
-}
-
-static void generic_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
- fep->phy_timer_list.data = (unsigned long)dev;
- fep->phy_timer_list.function = generic_timer_callback;
- add_timer(&fep->phy_timer_list);
-}
-
-static void generic_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- del_timer_sync(&fep->phy_timer_list);
-}
-
-/* ------------------------------------------------------------------------- */
-/* The Davicom DM9161 is used on the NETTA board */
-
-/* register definitions */
-
-#define MII_DM9161_ANAR 4 /* Aux. Config Register */
-#define MII_DM9161_ACR 16 /* Aux. Config Register */
-#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
-#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
-#define MII_DM9161_INTR 21 /* Interrupt Register */
-#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
-#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
-
-static void dm9161_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
- /* Start autonegotiation */
- fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ*8);
-}
-
-static void dm9161_ack_int(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
-}
-
-static void dm9161_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
-}
-
-/**********************************************************************************/
-
-static const struct phy_info phy_info[] = {
- {
- .id = 0x00181b88,
- .name = "DM9161",
- .startup = dm9161_startup,
- .ack_int = dm9161_ack_int,
- .shutdown = dm9161_shutdown,
- }, {
- .id = 0,
- .name = "GENERIC",
- .startup = generic_startup,
- .shutdown = generic_shutdown,
- },
-};
-
-/**********************************************************************************/
-
-static int phy_id_detect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- int i, r, start, end, phytype, physubtype;
- const struct phy_info *phy;
- int phy_hwid, phy_id;
-
- phy_hwid = -1;
- fep->phy = NULL;
-
- /* auto-detect? */
- if (fpi->phy_addr == -1) {
- start = 1;
- end = 32;
- } else { /* direct */
- start = fpi->phy_addr;
- end = start + 1;
- }
-
- for (phy_id = start; phy_id < end; phy_id++) {
- /* skip already used phy addresses on this bus */
- if (bus->usage_map & (1 << phy_id))
- continue;
- r = fs_mii_read(dev, phy_id, MII_PHYSID1);
- if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
- continue;
- r = fs_mii_read(dev, phy_id, MII_PHYSID2);
- if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
- continue;
- phy_hwid = (phytype << 16) | physubtype;
- if (phy_hwid != -1)
- break;
- }
-
- if (phy_hwid == -1) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s No PHY detected! range=0x%02x-0x%02x\n",
- dev->name, start, end);
- return -1;
- }
-
- for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
- if (phy->id == (phy_hwid >> 4) || phy->id == 0)
- break;
-
- if (i >= ARRAY_SIZE(phy_info)) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s PHY id 0x%08x is not supported!\n",
- dev->name, phy_hwid);
- return -1;
- }
-
- fep->phy = phy;
-
- /* mark this address as used */
- bus->usage_map |= (1 << phy_id);
-
- printk(KERN_INFO DRV_MODULE_NAME
- ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
- dev->name, phy_id, fep->phy->name, phy_hwid,
- fpi->phy_addr == -1 ? " (auto-detected)" : "");
-
- return phy_id;
-}
-
-void fs_mii_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->startup)
- (*fep->phy->startup) (dev);
-}
-
-void fs_mii_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->shutdown)
- (*fep->phy->shutdown) (dev);
-}
-
-void fs_mii_ack_int(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->ack_int)
- (*fep->phy->ack_int) (dev);
-}
-
-#define MII_LINK 0x0001
-#define MII_HALF 0x0002
-#define MII_FULL 0x0004
-#define MII_BASE4 0x0008
-#define MII_10M 0x0010
-#define MII_100M 0x0020
-#define MII_1G 0x0040
-#define MII_10G 0x0080
-
-/* return full mii info at one gulp, with a usable form */
-static unsigned int mii_full_status(struct mii_if_info *mii)
-{
- unsigned int status;
- int bmsr, adv, lpa, neg;
- struct fs_enet_private* fep = netdev_priv(mii->dev);
-
- /* first, a dummy read, needed to latch some MII phys */
- (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
- bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
-
- /* no link */
- if ((bmsr & BMSR_LSTATUS) == 0)
- return 0;
-
- status = MII_LINK;
-
- /* Lets look what ANEG says if it's supported - otherwize we shall
- take the right values from the platform info*/
- if(!mii->force_media) {
- /* autoneg not completed; don't bother */
- if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
- return 0;
-
- adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
- lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
-
- neg = lpa & adv;
- } else {
- neg = fep->fpi->bus_info->lpa;
- }
-
- if (neg & LPA_100FULL)
- status |= MII_FULL | MII_100M;
- else if (neg & LPA_100BASE4)
- status |= MII_FULL | MII_BASE4 | MII_100M;
- else if (neg & LPA_100HALF)
- status |= MII_HALF | MII_100M;
- else if (neg & LPA_10FULL)
- status |= MII_FULL | MII_10M;
- else
- status |= MII_HALF | MII_10M;
-
- return status;
-}
-
-void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct mii_if_info *mii = &fep->mii_if;
- unsigned int mii_status;
- int ok_to_print, link, duplex, speed;
- unsigned long flags;
-
- ok_to_print = netif_msg_link(fep);
-
- mii_status = mii_full_status(mii);
-
- if (!init_media && mii_status == fep->last_mii_status)
- return;
-
- fep->last_mii_status = mii_status;
-
- link = !!(mii_status & MII_LINK);
- duplex = !!(mii_status & MII_FULL);
- speed = (mii_status & MII_100M) ? 100 : 10;
-
- if (link == 0) {
- netif_carrier_off(mii->dev);
- netif_stop_queue(dev);
- if (!init_media) {
- spin_lock_irqsave(&fep->lock, flags);
- (*fep->ops->stop)(dev);
- spin_unlock_irqrestore(&fep->lock, flags);
- }
-
- if (ok_to_print)
- printk(KERN_INFO "%s: link down\n", mii->dev->name);
-
- } else {
-
- mii->full_duplex = duplex;
-
- netif_carrier_on(mii->dev);
-
- spin_lock_irqsave(&fep->lock, flags);
- fep->duplex = duplex;
- fep->speed = speed;
- (*fep->ops->restart)(dev);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- netif_start_queue(dev);
-
- if (ok_to_print)
- printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
- dev->name, speed, duplex ? "full" : "half");
- }
-}
-
-/**********************************************************************************/
-
-int fs_mii_read(struct net_device *dev, int phy_id, int location)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = fep->mii_bus;
-
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&bus->mii_lock, flags);
- ret = (*bus->mii_read)(bus, phy_id, location);
- spin_unlock_irqrestore(&bus->mii_lock, flags);
-
- return ret;
-}
-
-void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- unsigned long flags;
-
- spin_lock_irqsave(&bus->mii_lock, flags);
- (*bus->mii_write)(bus, phy_id, location, value);
- spin_unlock_irqrestore(&bus->mii_lock, flags);
-}
-
-/*****************************************************************************/
-
-/* list of all registered mii buses */
-static LIST_HEAD(fs_mii_bus_list);
-
-static struct fs_enet_mii_bus *lookup_bus(int method, int id)
-{
- struct list_head *ptr;
- struct fs_enet_mii_bus *bus;
-
- list_for_each(ptr, &fs_mii_bus_list) {
- bus = list_entry(ptr, struct fs_enet_mii_bus, list);
- if (bus->bus_info->method == method &&
- bus->bus_info->id == id)
- return bus;
- }
- return NULL;
-}
-
-static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
-{
- struct fs_enet_mii_bus *bus;
- int ret = 0;
-
- bus = kmalloc(sizeof(*bus), GFP_KERNEL);
- if (bus == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- memset(bus, 0, sizeof(*bus));
- spin_lock_init(&bus->mii_lock);
- bus->bus_info = bi;
- bus->refs = 0;
- bus->usage_map = 0;
-
- /* perform initialization */
- switch (bi->method) {
-
- case fsmii_fixed:
- ret = fs_mii_fixed_init(bus);
- if (ret != 0)
- goto err;
- break;
-
- case fsmii_bitbang:
- ret = fs_mii_bitbang_init(bus);
- if (ret != 0)
- goto err;
- break;
-#ifdef CONFIG_FS_ENET_HAS_FEC
- case fsmii_fec:
- ret = fs_mii_fec_init(bus);
- if (ret != 0)
- goto err;
- break;
-#endif
- default:
- ret = -EINVAL;
- goto err;
- }
-
- list_add(&bus->list, &fs_mii_bus_list);
-
- return bus;
-
-err:
- kfree(bus);
- return ERR_PTR(ret);
-}
-
-static void destroy_bus(struct fs_enet_mii_bus *bus)
-{
- /* remove from bus list */
- list_del(&bus->list);
-
- /* nothing more needed */
- kfree(bus);
-}
-
-int fs_mii_connect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- struct fs_enet_mii_bus *bus = NULL;
-
- /* check method validity */
- switch (fpi->bus_info->method) {
- case fsmii_fixed:
- case fsmii_bitbang:
- break;
-#ifdef CONFIG_FS_ENET_HAS_FEC
- case fsmii_fec:
- break;
-#endif
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Unknown MII bus method (%d)!\n",
- dev->name, fpi->bus_info->method);
- return -EINVAL;
- }
-
- bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
-
- /* if not found create new bus */
- if (bus == NULL) {
- bus = create_bus(fpi->bus_info);
- if (IS_ERR(bus)) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s MII bus creation failure!\n", dev->name);
- return PTR_ERR(bus);
- }
- }
-
- bus->refs++;
-
- fep->mii_bus = bus;
-
- fep->mii_if.dev = dev;
- fep->mii_if.phy_id_mask = 0x1f;
- fep->mii_if.reg_num_mask = 0x1f;
- fep->mii_if.mdio_read = fs_mii_read;
- fep->mii_if.mdio_write = fs_mii_write;
- fep->mii_if.force_media = fpi->bus_info->disable_aneg;
- fep->mii_if.phy_id = phy_id_detect(dev);
-
- return 0;
-}
-
-void fs_mii_disconnect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = NULL;
-
- bus = fep->mii_bus;
- fep->mii_bus = NULL;
-
- if (--bus->refs <= 0)
- destroy_bus(bus);
-}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e7ec96c964a9..95022c005f75 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/phy.h>
#include <linux/fs_enet_pd.h>
@@ -12,12 +13,30 @@
#ifdef CONFIG_CPM1
#include <asm/commproc.h>
+
+struct fec_info {
+ fec_t* fecp;
+ u32 mii_speed;
+};
#endif
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#endif
+/* This is used to operate with pins.
+ Note that the actual port size may
+ be different; cpm(s) handle it OK */
+struct bb_info {
+ u8 mdio_dat_msk;
+ u8 mdio_dir_msk;
+ u8 *mdio_dir;
+ u8 *mdio_dat;
+ u8 mdc_msk;
+ u8 *mdc_dat;
+ int delay;
+};
+
/* hw driver ops */
struct fs_ops {
int (*setup_data)(struct net_device *dev);
@@ -25,6 +44,7 @@ struct fs_ops {
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
+ void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
void (*pre_request_irq)(struct net_device *dev, int irq);
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus {
};
};
-int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
-int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
-int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
-
struct fs_enet_private {
struct device *dev; /* pointer back to the device (must be initialized first) */
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -130,7 +146,8 @@ struct fs_enet_private {
struct fs_enet_mii_bus *mii_bus;
int interrupt;
- int duplex, speed; /* current settings */
+ struct phy_device *phydev;
+ int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
u32 ev_napi_rx; /* mask of NAPI rx events */
@@ -168,15 +185,9 @@ struct fs_enet_private {
};
/***************************************************************************/
-
-int fs_mii_read(struct net_device *dev, int phy_id, int location);
-void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
-
-void fs_mii_startup(struct net_device *dev);
-void fs_mii_shutdown(struct net_device *dev);
-void fs_mii_ack_int(struct net_device *dev);
-
-void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
+int fs_enet_mdio_bb_init(void);
+int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
+int fs_enet_mdio_fec_init(void);
void fs_init_bds(struct net_device *dev);
void fs_cleanup_bds(struct net_device *dev);
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void);
void fs_enet_platform_cleanup(void);
/***************************************************************************/
-
/* buffer descriptor access macros */
/* access macros */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 64e20982c1fe..1ff2597b8495 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep)
/* Attach the memory for the FCC Parameter RAM */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
- fep->fcc.ep = (void *)r->start;
-
+ fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
if (fep->fcc.ep == NULL)
return -EINVAL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
- fep->fcc.fccp = (void *)r->start;
-
+ fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
if (fep->fcc.fccp == NULL)
return -EINVAL;
- fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
+ if (fep->fpi->fcc_regs_c) {
+
+ fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
+ } else {
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "fcc_regs_c");
+ fep->fcc.fcccp = (void *)ioremap(r->start,
+ r->end - r->start + 1);
+ }
if (fep->fcc.fcccp == NULL)
return -EINVAL;
+ fep->fcc.mem = (void *)fep->fpi->mem_offset;
+ if (fep->fcc.mem == NULL)
+ return -EINVAL;
+
return 0;
}
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev)
if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
return -EINVAL;
- fep->fcc.mem = (void *)fpi->mem_offset;
-
if (do_pd_setup(fep) != 0)
return -EINVAL;
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev)
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
- if (fep->speed == 100)
+ if (fep->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (fep->duplex)
+ if (fep->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev)
static void tx_kickstart(struct net_device *dev)
{
- /* nothing */
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_ftodr, 0x80);
}
static u32 get_int_events(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index e09547077529..c2c5fd419bd0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -46,6 +46,7 @@
#endif
#include "fs_enet.h"
+#include "fec.h"
/*************************************************/
@@ -75,50 +76,8 @@
/* clear bits */
#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
-
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY 0x04C11DB7
-
-#define FEC_MAX_MULTICAST_ADDRS 64
-
-/* Interrupt events/masks.
-*/
-#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
-#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
-#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
-#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
-#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
-#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
-#define FEC_ENET_RXF 0x02000000U /* Full frame received */
-#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
-#define FEC_ENET_MII 0x00800000U /* MII interrupt */
-#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
-
-#define FEC_ECNTRL_PINMUX 0x00000004
-#define FEC_ECNTRL_ETHER_EN 0x00000002
-#define FEC_ECNTRL_RESET 0x00000001
-
-#define FEC_RCNTRL_BC_REJ 0x00000010
-#define FEC_RCNTRL_PROM 0x00000008
-#define FEC_RCNTRL_MII_MODE 0x00000004
-#define FEC_RCNTRL_DRT 0x00000002
-#define FEC_RCNTRL_LOOP 0x00000001
-
-#define FEC_TCNTRL_FDEN 0x00000004
-#define FEC_TCNTRL_HBC 0x00000002
-#define FEC_TCNTRL_GTS 0x00000001
-
-
-/* Make MII read/write commands for the FEC.
-*/
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
-#define mk_mii_end 0
-
-#define FEC_MII_LOOPS 10000
-
/*
- * Delay to wait for FEC reset command to complete (in us)
+ * Delay to wait for FEC reset command to complete (in us)
*/
#define FEC_RESET_DELAY 50
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev)
int r;
u32 addrhi, addrlo;
+ struct mii_bus* mii = fep->phydev->bus;
+ struct fec_info* fec_inf = mii->priv;
+
r = whack_reset(fep->fec.fecp);
if (r != 0)
printk(KERN_ERR DRV_MODULE_NAME
": %s FEC Reset FAILED!\n", dev->name);
-
/*
- * Set station address.
+ * Set station address.
*/
addrhi = ((u32) dev->dev_addr[0] << 24) |
((u32) dev->dev_addr[1] << 16) |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev)
FW(fecp, fun_code, 0x78000000);
/*
- * Set MII speed.
+ * Set MII speed.
*/
- FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
+ FW(fecp, mii_speed, fec_inf->mii_speed);
/*
- * Clear any outstanding interrupt.
+ * Clear any outstanding interrupt.
*/
FW(fecp, ievent, 0xffc0);
FW(fecp, ivec, (fep->interrupt / 2) << 29);
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev)
}
#endif
+
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
/*
- * adjust to duplex mode
+ * adjust to duplex mode
*/
- if (fep->duplex) {
+ if (fep->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev)
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
fec_t *fecp = fep->fec.fecp;
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- const struct fs_mii_bus_info *bi = bus->bus_info;
+
+ struct fec_info* feci= fep->phydev->bus->priv;
+
int i;
if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
/* shut down FEC1? that's where the mii bus is */
- if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
+ if (fpi->has_phy) {
FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, bus->fec.mii_speed);
+ FW(fecp, mii_speed, feci->mii_speed);
}
}
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = {
.free_bd = free_bd,
};
-/***********************************************************************/
-
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
-{
- fec_t *fecp = bus->fec.fecp;
- int i, ret = -1;
-
- if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
-
- /* Add PHY address to register command. */
- FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
-
- for (i = 0; i < FEC_MII_LOOPS; i++)
- if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
- break;
-
- if (i < FEC_MII_LOOPS) {
- FW(fecp, ievent, FEC_ENET_MII);
- ret = FR(fecp, mii_data) & 0xffff;
- }
-
- return ret;
-}
-
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
-{
- fec_t *fecp = bus->fec.fecp;
- int i;
-
- /* this must never happen */
- if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
-
- /* Add PHY address to register command. */
- FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
-
- for (i = 0; i < FEC_MII_LOOPS; i++)
- if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
- break;
-
- if (i < FEC_MII_LOOPS)
- FW(fecp, ievent, FEC_ENET_MII);
-}
-
-int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
-{
- bd_t *bd = (bd_t *)__res;
- const struct fs_mii_bus_info *bi = bus->bus_info;
- fec_t *fecp;
-
- if (bi->id != 0)
- return -1;
-
- bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
- bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
- & 0x3F) << 1;
-
- fecp = bus->fec.fecp;
-
- FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
- FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
- FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, bus->fec.mii_speed);
-
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
-
- return 0;
-}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index eaa24fab645f..95ec5872c507 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (fep->duplex)
+ if (fep->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev)
scc_cr_cmd(fep, CPM_CR_RESTART_TX);
}
+
+
/*************************************************************************/
const struct fs_ops fs_scc_ops = {
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 48f9cf83ab6f..0b9b8b5c847c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -33,6 +33,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
@@ -40,129 +41,25 @@
#include "fs_enet.h"
-#ifdef CONFIG_8xx
-static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
+static int bitbang_prep_bit(u8 **datp, u8 *mskp,
+ struct fs_mii_bit *mii_bit)
{
- immap_t *im = (immap_t *)fs_enet_immap;
- void *dir, *dat, *ppar;
+ void *dat;
int adv;
u8 msk;
- switch (port) {
- case fsiop_porta:
- dir = &im->im_ioport.iop_padir;
- dat = &im->im_ioport.iop_padat;
- ppar = &im->im_ioport.iop_papar;
- break;
-
- case fsiop_portb:
- dir = &im->im_cpm.cp_pbdir;
- dat = &im->im_cpm.cp_pbdat;
- ppar = &im->im_cpm.cp_pbpar;
- break;
-
- case fsiop_portc:
- dir = &im->im_ioport.iop_pcdir;
- dat = &im->im_ioport.iop_pcdat;
- ppar = &im->im_ioport.iop_pcpar;
- break;
-
- case fsiop_portd:
- dir = &im->im_ioport.iop_pddir;
- dat = &im->im_ioport.iop_pddat;
- ppar = &im->im_ioport.iop_pdpar;
- break;
-
- case fsiop_porte:
- dir = &im->im_cpm.cp_pedir;
- dat = &im->im_cpm.cp_pedat;
- ppar = &im->im_cpm.cp_pepar;
- break;
-
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- "Illegal port value %d!\n", port);
- return -EINVAL;
- }
-
- adv = bit >> 3;
- dir = (char *)dir + adv;
- dat = (char *)dat + adv;
- ppar = (char *)ppar + adv;
-
- msk = 1 << (7 - (bit & 7));
- if ((in_8(ppar) & msk) != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "pin %d on port %d is not general purpose!\n", bit, port);
- return -EINVAL;
- }
-
- *dirp = dir;
- *datp = dat;
- *mskp = msk;
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_8260
-static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
-{
- iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
- void *dir, *dat, *ppar;
- int adv;
- u8 msk;
-
- switch (port) {
- case fsiop_porta:
- dir = &io->iop_pdira;
- dat = &io->iop_pdata;
- ppar = &io->iop_ppara;
- break;
-
- case fsiop_portb:
- dir = &io->iop_pdirb;
- dat = &io->iop_pdatb;
- ppar = &io->iop_pparb;
- break;
-
- case fsiop_portc:
- dir = &io->iop_pdirc;
- dat = &io->iop_pdatc;
- ppar = &io->iop_pparc;
- break;
-
- case fsiop_portd:
- dir = &io->iop_pdird;
- dat = &io->iop_pdatd;
- ppar = &io->iop_ppard;
- break;
-
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- "Illegal port value %d!\n", port);
- return -EINVAL;
- }
+ dat = (void*) mii_bit->offset;
- adv = bit >> 3;
- dir = (char *)dir + adv;
+ adv = mii_bit->bit >> 3;
dat = (char *)dat + adv;
- ppar = (char *)ppar + adv;
- msk = 1 << (7 - (bit & 7));
- if ((in_8(ppar) & msk) != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "pin %d on port %d is not general purpose!\n", bit, port);
- return -EINVAL;
- }
+ msk = 1 << (7 - (mii_bit->bit & 7));
- *dirp = dir;
*datp = dat;
*mskp = msk;
return 0;
}
-#endif
static inline void bb_set(u8 *p, u8 m)
{
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m)
return (in_8(p) & m) != 0;
}
-static inline void mdio_active(struct fs_enet_mii_bus *bus)
+static inline void mdio_active(struct bb_info *bitbang)
{
- bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+ bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk);
}
-static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
+static inline void mdio_tristate(struct bb_info *bitbang )
{
- bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+ bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk);
}
-static inline int mdio_read(struct fs_enet_mii_bus *bus)
+static inline int mdio_read(struct bb_info *bitbang )
{
- return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk);
}
-static inline void mdio(struct fs_enet_mii_bus *bus, int what)
+static inline void mdio(struct bb_info *bitbang , int what)
{
if (what)
- bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk);
else
- bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk);
}
-static inline void mdc(struct fs_enet_mii_bus *bus, int what)
+static inline void mdc(struct bb_info *bitbang , int what)
{
if (what)
- bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+ bb_set(bitbang->mdc_dat, bitbang->mdc_msk);
else
- bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+ bb_clr(bitbang->mdc_dat, bitbang->mdc_msk);
}
-static inline void mii_delay(struct fs_enet_mii_bus *bus)
+static inline void mii_delay(struct bb_info *bitbang )
{
- udelay(bus->bus_info->i.bitbang.delay);
+ udelay(bitbang->delay);
}
/* Utility to send the preamble, address, and register (common to read and write). */
-static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
+static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
{
int j;
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
* but it is safer and will be much more robust.
*/
- mdio_active(bus);
- mdio(bus, 1);
+ mdio_active(bitbang);
+ mdio(bitbang, 1);
for (j = 0; j < 32; j++) {
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
}
/* send the start bit (01) and the read opcode (10) or write (10) */
- mdc(bus, 0);
- mdio(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, 1);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, read);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, !read);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, read);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, !read);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* send the PHY address */
for (j = 0; j < 5; j++) {
- mdc(bus, 0);
- mdio(bus, (addr & 0x10) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (addr & 0x10) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
addr <<= 1;
}
/* send the register address */
for (j = 0; j < 5; j++) {
- mdc(bus, 0);
- mdio(bus, (reg & 0x10) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (reg & 0x10) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
reg <<= 1;
}
}
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
+static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
{
u16 rdreg;
int ret, j;
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
+ struct bb_info* bitbang = bus->priv;
- bitbang_pre(bus, 1, addr, reg);
+ bitbang_pre(bitbang, 1, addr, reg);
/* tri-state our MDIO I/O pin so we can read */
- mdc(bus, 0);
- mdio_tristate(bus);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio_tristate(bitbang);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* check the turnaround bit: the PHY should be driving it to zero */
- if (mdio_read(bus) != 0) {
+ if (mdio_read(bitbang) != 0) {
/* PHY didn't drive TA low */
for (j = 0; j < 32; j++) {
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
}
ret = -1;
goto out;
}
- mdc(bus, 0);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
/* read 16 bits of register data, MSB first */
rdreg = 0;
for (j = 0; j < 16; j++) {
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
rdreg <<= 1;
- rdreg |= mdio_read(bus);
- mdc(bus, 0);
- mii_delay(bus);
+ rdreg |= mdio_read(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
}
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
ret = rdreg;
out:
return ret;
}
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
+static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
int j;
+ struct bb_info* bitbang = bus->priv;
+
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
u16 value = val & 0xffff;
- bitbang_pre(bus, 0, addr, reg);
+ bitbang_pre(bitbang, 0, addr, reg);
/* send the turnaround (10) */
- mdc(bus, 0);
- mdio(bus, 1);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* write 16 bits of register data, MSB first */
for (j = 0; j < 16; j++) {
- mdc(bus, 0);
- mdio(bus, (value & 0x8000) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (value & 0x8000) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
value <<= 1;
}
/*
* Tri-state the MDIO line.
*/
- mdio_tristate(bus);
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdio_tristate(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ return 0;
}
-int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
+static int fs_enet_mii_bb_reset(struct mii_bus *bus)
+{
+ /*nothing here - dunno how to reset it*/
+ return 0;
+}
+
+static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi)
{
- const struct fs_mii_bus_info *bi = bus->bus_info;
int r;
- r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
- &bus->bitbang.mdio_dat,
- &bus->bitbang.mdio_msk,
- bi->i.bitbang.mdio_port,
- bi->i.bitbang.mdio_bit);
+ bitbang->delay = fmpi->delay;
+
+ r = bitbang_prep_bit(&bitbang->mdio_dir,
+ &bitbang->mdio_dir_msk,
+ &fmpi->mdio_dir);
if (r != 0)
return r;
- r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
- &bus->bitbang.mdc_dat,
- &bus->bitbang.mdc_msk,
- bi->i.bitbang.mdc_port,
- bi->i.bitbang.mdc_bit);
+ r = bitbang_prep_bit(&bitbang->mdio_dat,
+ &bitbang->mdio_dat_msk,
+ &fmpi->mdio_dat);
if (r != 0)
return r;
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
+ r = bitbang_prep_bit(&bitbang->mdc_dat,
+ &bitbang->mdc_msk,
+ &fmpi->mdc_dat);
+ if (r != 0)
+ return r;
return 0;
}
+
+
+static int __devinit fs_enet_mdio_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fs_mii_bb_platform_info *pdata;
+ struct mii_bus *new_bus;
+ struct bb_info *bitbang;
+ int err = 0;
+
+ if (NULL == dev)
+ return -EINVAL;
+
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+
+ if (NULL == bitbang)
+ return -ENOMEM;
+
+ new_bus->name = "BB MII Bus",
+ new_bus->read = &fs_enet_mii_bb_read,
+ new_bus->write = &fs_enet_mii_bb_write,
+ new_bus->reset = &fs_enet_mii_bb_reset,
+ new_bus->id = pdev->id;
+
+ new_bus->phy_mask = ~0x9;
+ pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
+
+ if (NULL == pdata) {
+ printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
+ return -ENODEV;
+ }
+
+ /*set up workspace*/
+ fs_mii_bitbang_init(bitbang, pdata);
+
+ new_bus->priv = bitbang;
+
+ new_bus->irq = pdata->irq;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ err = mdiobus_register(new_bus);
+
+ if (0 != err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto bus_register_fail;
+ }
+
+ return 0;
+
+bus_register_fail:
+ kfree(bitbang);
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fs_enet_mdio_remove(struct device *dev)
+{
+ struct mii_bus *bus = dev_get_drvdata(dev);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(dev, NULL);
+
+ iounmap((void *) (&bus->priv));
+ bus->priv = NULL;
+ kfree(bus);
+
+ return 0;
+}
+
+static struct device_driver fs_enet_bb_mdio_driver = {
+ .name = "fsl-bb-mdio",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+int fs_enet_mdio_bb_init(void)
+{
+ return driver_register(&fs_enet_bb_mdio_driver);
+}
+
+void fs_enet_mdio_bb_exit(void)
+{
+ driver_unregister(&fs_enet_bb_mdio_driver);
+}
+
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
new file mode 100644
index 000000000000..1328e10caa35
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -0,0 +1,243 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+static int match_has_phy (struct device *dev, void* data)
+{
+ struct platform_device* pdev = container_of(dev, struct platform_device, dev);
+ struct fs_platform_info* fpi;
+ if(strcmp(pdev->name, (char*)data))
+ {
+ return 0;
+ }
+
+ fpi = pdev->dev.platform_data;
+ if((fpi)&&(fpi->has_phy))
+ return 1;
+ return 0;
+}
+
+static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
+{
+ struct resource *r;
+ fec_t *fecp;
+ char* name = "fsl-cpm-fec";
+
+ /* we need fec in order to be useful */
+ struct platform_device *fec_pdev =
+ container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
+ struct platform_device, dev);
+
+ if(fec_pdev == NULL) {
+ printk(KERN_ERR"Unable to find PHY for %s", name);
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
+
+ fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
+ fec->mii_speed = fmpi->mii_speed;
+
+ setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ out_be32(&fecp->fec_mii_speed, fec->mii_speed);
+
+ return 0;
+}
+
+static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
+{
+ struct fec_info* fec = bus->priv;
+ fec_t *fecp = fec->fecp;
+ int i, ret = -1;
+
+ if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ ret = in_be32(&fecp->fec_mii_data) & 0xffff;
+ }
+
+ return ret;
+
+}
+
+static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ struct fec_info* fec = bus->priv;
+ fec_t *fecp = fec->fecp;
+ int i;
+
+ /* this must never happen */
+ if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+
+ return 0;
+
+}
+
+static int fs_enet_fec_mii_reset(struct mii_bus *bus)
+{
+ /* nothing here - for now */
+ return 0;
+}
+
+static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fs_mii_fec_platform_info *pdata;
+ struct mii_bus *new_bus;
+ struct fec_info *fec;
+ int err = 0;
+ if (NULL == dev)
+ return -EINVAL;
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
+
+ if (NULL == fec)
+ return -ENOMEM;
+
+ new_bus->name = "FEC MII Bus",
+ new_bus->read = &fs_enet_fec_mii_read,
+ new_bus->write = &fs_enet_fec_mii_write,
+ new_bus->reset = &fs_enet_fec_mii_reset,
+ new_bus->id = pdev->id;
+
+ pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
+
+ if (NULL == pdata) {
+ printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
+ return -ENODEV;
+ }
+
+ /*set up workspace*/
+
+ fs_mii_fec_init(fec, pdata);
+ new_bus->priv = fec;
+
+ new_bus->irq = pdata->irq;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ err = mdiobus_register(new_bus);
+
+ if (0 != err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto bus_register_fail;
+ }
+
+ return 0;
+
+bus_register_fail:
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fs_enet_fec_mdio_remove(struct device *dev)
+{
+ struct mii_bus *bus = dev_get_drvdata(dev);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(dev, NULL);
+ kfree(bus->priv);
+
+ bus->priv = NULL;
+ kfree(bus);
+
+ return 0;
+}
+
+static struct device_driver fs_enet_fec_mdio_driver = {
+ .name = "fsl-cpm-fec-mdio",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_fec_mdio_probe,
+ .remove = fs_enet_fec_mdio_remove,
+};
+
+int fs_enet_mdio_fec_init(void)
+{
+ return driver_register(&fs_enet_fec_mdio_driver);
+}
+
+void fs_enet_mdio_fec_exit(void)
+{
+ driver_unregister(&fs_enet_fec_mdio_driver);
+}
+
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
deleted file mode 100644
index ae4a9c3bb393..000000000000
--- a/drivers/net/fs_enet/mii-fixed.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "fs_enet.h"
-
-static const u16 mii_regs[7] = {
- 0x3100,
- 0x786d,
- 0x0fff,
- 0x0fff,
- 0x01e1,
- 0x45e1,
- 0x0003,
-};
-
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
-{
- int ret = 0;
-
- if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
- return -1;
-
- if (location != 5)
- ret = mii_regs[location];
- else
- ret = bus->fixed.lpa;
-
- return ret;
-}
-
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
-{
- /* do nothing */
-}
-
-int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
-{
- const struct fs_mii_bus_info *bi = bus->bus_info;
-
- bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
-
- /* if speed is fixed at 10Mb, remove 100Mb modes */
- if (bi->i.fixed.speed == 10)
- bus->fixed.lpa &= ~LPA_100;
-
- /* if duplex is half, remove full duplex modes */
- if (bi->i.fixed.duplex == 0)
- bus->fixed.lpa &= ~LPA_DUPLEX;
-
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
-
- return 0;
-}
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c
index 49dacc6e35aa..2b4db7414475 100644
--- a/drivers/net/gt96100eth.c
+++ b/drivers/net/gt96100eth.c
@@ -699,7 +699,6 @@ static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
memset(gp, 0, sizeof(*gp)); // clear it
gp->port_num = port_num;
- gp->io_size = GT96100_ETH_IO_SIZE;
gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
gp->phy_addr = phy_addr;
gp->chip_rev = chip_rev;
@@ -1531,7 +1530,7 @@ static void gt96100_cleanup_module(void)
+ sizeof(gt96100_td_t) * TX_RING_SIZE,
gp->rx_ring);
free_netdev(gtif->dev);
- release_region(gtif->iobase, gp->io_size);
+ release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
}
}
}
diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h
index 2a8331938b84..3b62a87c7d7f 100644
--- a/drivers/net/gt96100eth.h
+++ b/drivers/net/gt96100eth.h
@@ -331,7 +331,6 @@ struct gt96100_private {
mib_counters_t mib;
struct net_device_stats stats;
- int io_size;
int port_num; // 0 or 1
int chip_rev;
u32 port_offset;
@@ -340,7 +339,6 @@ struct gt96100_private {
u32 last_psr; // last value of the port status register
int options; /* User-settable misc. driver options. */
- int drv_flags;
struct timer_list timer;
spinlock_t lock; /* Serialise access to device */
};
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 7bcd939c6edd..409c6aab0411 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -20,22 +20,15 @@
Support and updates available at
http://www.scyld.com/network/hamachi.html
+ [link no longer provides useful info -jgarzik]
or
http://www.parl.clemson.edu/~keithu/hamachi.html
-
-
- Linux kernel changelog:
-
- LK1.0.1:
- - fix lack of pci_dev<->dev association
- - ethtool support (jgarzik)
-
*/
#define DRV_NAME "hamachi"
-#define DRV_VERSION "1.01+LK1.0.1"
-#define DRV_RELDATE "5/18/2001"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "June 27, 2006"
/* A few user-configurable values. */
@@ -608,7 +601,8 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
pci_set_master(pdev);
i = pci_request_regions(pdev, DRV_NAME);
- if (i) return i;
+ if (i)
+ return i;
irq = pdev->irq;
ioaddr = ioremap(base, 0x400);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0641f54fc638..889f338132fa 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -122,6 +122,12 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
+/*
+ * bpqether network devices are paired with ethernet devices below them, so
+ * form a special "super class" of normal ethernet devices; split their locks
+ * off into a separate class since they always nest.
+ */
+static struct lock_class_key bpq_netdev_xmit_lock_key;
/* ------------------------------------------------------------------------ */
@@ -528,6 +534,7 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
+ lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 3a42afab5036..43e3f33ed5e2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
for (i = 0; i < numifbs && !err; i++)
err = ifb_init_one(i);
if (err) {
+ i--;
while (--i >= 0)
ifb_free_one(i);
}
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index bf1fca5a3fa0..e3c8cd5eca67 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void)
{
ali_chip_t *chip;
chipio_t info;
- int ret = -ENODEV;
+ int ret;
int cfg, cfg_base;
int reg, revision;
int i = 0;
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void)
return ret;
}
+ ret = -ENODEV;
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index a4674044bd6f..2eff45bedc7c 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
#ifdef CONFIG_PCI
#define PCIID_VENDOR_INTEL 0x8086
#define PCIID_VENDOR_ALI 0x10b9
-static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitdata = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
{
.vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index b91e082483f6..7bbd447289b5 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint16_t ipcse, tucse, mss;
int err;
- if(likely(skb_shinfo(skb)->gso_size)) {
+ if (likely(skb_is_gso(skb))) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -1281,7 +1281,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
buffer_info->dma =
pci_map_single(adapter->pdev,
@@ -1306,7 +1306,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
buffer_info->dma =
pci_map_page(adapter->pdev,
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index c1c3452c90ca..5b4dbfe5fb77 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 646e89fc3562..c0ec7f6abcb2 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
MODULE_LICENSE("GPL");
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 43fef7de8cb9..997cbce9af6e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
#ifdef LOOPBACK_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 760c61b98867..eeab1df5bef3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -385,6 +385,8 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
struct pkt_info pkt_info;
while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
+ dma_unmap_single(NULL, pkt_info.buf_ptr, RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
mp->rx_desc_count--;
received_packets++;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 72aad42db7b4..9bdd43ab3573 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -177,6 +177,7 @@ struct myri10ge_priv {
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
int watchdog_tx_done;
+ int watchdog_tx_req;
int watchdog_resets;
int tx_linearized;
int pause;
@@ -188,7 +189,6 @@ struct myri10ge_priv {
int vendor_specific_offset;
u32 devctl;
u16 msi_flags;
- u32 pm_state[16];
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
@@ -449,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
struct mcp_gen_header *hdr;
size_t hdr_offset;
int status;
+ unsigned i;
if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
@@ -480,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
goto abort_with_fw;
crc = crc32(~0, fw->data, fw->size);
- if (mgp->tx.boundary == 2048) {
- /* Avoid PCI burst on chipset with unaligned completions. */
- int i;
- __iomem u32 *ptr = (__iomem u32 *) (mgp->sram +
- MYRI10GE_FW_OFFSET);
- for (i = 0; i < fw->size / 4; i++) {
- __raw_writel(((u32 *) fw->data)[i], ptr + i);
- wmb();
- }
- } else {
- myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
- fw->size);
+ for (i = 0; i < fw->size; i += 256) {
+ myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
+ fw->data + i,
+ min(256U, (unsigned)(fw->size - i)));
+ mb();
+ readb(mgp->sram);
}
/* corruption checking is good for parity recovery and buggy chipset */
memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
@@ -621,7 +616,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
return -ENXIO;
}
dev_info(&mgp->pdev->dev, "handoff confirmed\n");
- myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+ myri10ge_dummy_rdma(mgp, 1);
return 0;
}
@@ -1289,6 +1284,7 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
+ "tx_boundary", "WC", "irq", "MSI",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "tx_pkt_start", "tx_pkt_done",
"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
@@ -1327,6 +1323,10 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((unsigned long *)&mgp->stats)[i];
+ data[i++] = (unsigned int)mgp->tx.boundary;
+ data[i++] = (unsigned int)(mgp->mtrr >= 0);
+ data[i++] = (unsigned int)mgp->pdev->irq;
+ data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->read_dma;
data[i++] = (unsigned int)mgp->write_dma;
data[i++] = (unsigned int)mgp->read_write_dma;
@@ -2112,7 +2112,7 @@ abort_linearize:
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);
@@ -2197,8 +2197,6 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
* any other device, except if forced with myri10ge_ecrc_enable > 1.
*/
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d
-
static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
{
struct pci_dev *bridge = mgp->pdev->bus->self;
@@ -2410,18 +2408,24 @@ static int myri10ge_resume(struct pci_dev *pdev)
return -EIO;
}
myri10ge_restore_state(mgp);
- pci_enable_device(pdev);
+
+ status = pci_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return -EIO;
+ }
+
pci_set_master(pdev);
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
netdev->name, mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
- goto abort_with_msi;
+ goto abort_with_enabled;
}
myri10ge_reset(mgp);
- myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+ myri10ge_dummy_rdma(mgp, 1);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
@@ -2436,7 +2440,8 @@ static int myri10ge_resume(struct pci_dev *pdev)
return 0;
-abort_with_msi:
+abort_with_enabled:
+ pci_disable_device(pdev);
return -EIO;
}
@@ -2538,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
mgp = (struct myri10ge_priv *)arg;
if (mgp->tx.req != mgp->tx.done &&
- mgp->tx.done == mgp->watchdog_tx_done)
+ mgp->tx.done == mgp->watchdog_tx_done &&
+ mgp->watchdog_tx_req != mgp->watchdog_tx_done)
/* nic seems like it might be stuck.. */
schedule_work(&mgp->watchdog_work);
else
@@ -2547,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
jiffies + myri10ge_watchdog_timeout * HZ);
mgp->watchdog_tx_done = mgp->tx.done;
+ mgp->watchdog_tx_req = mgp->tx.req;
}
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2737,11 +2744,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
goto abort_with_irq;
}
-
- printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n",
- netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"),
- pdev->irq, mgp->tx.boundary, mgp->fw_name,
- (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
+ dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ (mgp->msi_enabled ? "MSI" : "xPIC"),
+ pdev->irq, mgp->tx.boundary, mgp->fw_name,
+ (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
return 0;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 9df2628be1e7..db0475a1102f 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -20,120 +20,9 @@
Support information and updates available at
http://www.scyld.com/network/netsemi.html
+ [link no longer provides useful info -jgarzik]
- Linux kernel modifications:
-
- Version 1.0.1:
- - Spinlock fixes
- - Bug fixes and better intr performance (Tjeerd)
- Version 1.0.2:
- - Now reads correct MAC address from eeprom
- Version 1.0.3:
- - Eliminate redundant priv->tx_full flag
- - Call netif_start_queue from dev->tx_timeout
- - wmb() in start_tx() to flush data
- - Update Tx locking
- - Clean up PCI enable (davej)
- Version 1.0.4:
- - Merge Donald Becker's natsemi.c version 1.07
- Version 1.0.5:
- - { fill me in }
- Version 1.0.6:
- * ethtool support (jgarzik)
- * Proper initialization of the card (which sometimes
- fails to occur and leaves the card in a non-functional
- state). (uzi)
-
- * Some documented register settings to optimize some
- of the 100Mbit autodetection circuitry in rev C cards. (uzi)
-
- * Polling of the PHY intr for stuff like link state
- change and auto- negotiation to finally work properly. (uzi)
-
- * One-liner removal of a duplicate declaration of
- netdev_error(). (uzi)
-
- Version 1.0.7: (Manfred Spraul)
- * pci dma
- * SMP locking update
- * full reset added into tx_timeout
- * correct multicast hash generation (both big and little endian)
- [copied from a natsemi driver version
- from Myrio Corporation, Greg Smith]
- * suspend/resume
-
- version 1.0.8 (Tim Hockin <thockin@sun.com>)
- * ETHTOOL_* support
- * Wake on lan support (Erik Gilling)
- * MXDMA fixes for serverworks
- * EEPROM reload
-
- version 1.0.9 (Manfred Spraul)
- * Main change: fix lack of synchronize
- netif_close/netif_suspend against a last interrupt
- or packet.
- * do not enable superflous interrupts (e.g. the
- drivers relies on TxDone - TxIntr not needed)
- * wait that the hardware has really stopped in close
- and suspend.
- * workaround for the (at least) gcc-2.95.1 compiler
- problem. Also simplifies the code a bit.
- * disable_irq() in tx_timeout - needed to protect
- against rx interrupts.
- * stop the nic before switching into silent rx mode
- for wol (required according to docu).
-
- version 1.0.10:
- * use long for ee_addr (various)
- * print pointers properly (DaveM)
- * include asm/irq.h (?)
-
- version 1.0.11:
- * check and reset if PHY errors appear (Adrian Sun)
- * WoL cleanup (Tim Hockin)
- * Magic number cleanup (Tim Hockin)
- * Don't reload EEPROM on every reset (Tim Hockin)
- * Save and restore EEPROM state across reset (Tim Hockin)
- * MDIO Cleanup (Tim Hockin)
- * Reformat register offsets/bits (jgarzik)
-
- version 1.0.12:
- * ETHTOOL_* further support (Tim Hockin)
-
- version 1.0.13:
- * ETHTOOL_[G]EEPROM support (Tim Hockin)
-
- version 1.0.13:
- * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
-
- version 1.0.14:
- * Cleanup some messages and autoneg in ethtool (Tim Hockin)
-
- version 1.0.15:
- * Get rid of cable_magic flag
- * use new (National provided) solution for cable magic issue
-
- version 1.0.16:
- * call netdev_rx() for RxErrors (Manfred Spraul)
- * formatting and cleanups
- * change options and full_duplex arrays to be zero
- initialized
- * enable only the WoL and PHY interrupts in wol mode
-
- version 1.0.17:
- * only do cable_magic on 83815 and early 83816 (Tim Hockin)
- * create a function for rx refill (Manfred Spraul)
- * combine drain_ring and init_ring (Manfred Spraul)
- * oom handling (Manfred Spraul)
- * hands_off instead of playing with netif_device_{de,a}ttach
- (Manfred Spraul)
- * be sure to write the MAC back to the chip (Manfred Spraul)
- * lengthen EEPROM timeout, and always warn about timeouts
- (Manfred Spraul)
- * comments update (Manfred)
- * do the right thing on a phy-reset (Manfred and Tim)
-
TODO:
* big endian support with CFG:BEM instead of cpu_to_le32
*/
@@ -165,8 +54,8 @@
#include <asm/uaccess.h>
#define DRV_NAME "natsemi"
-#define DRV_VERSION "1.07+LK1.0.17"
-#define DRV_RELDATE "Sep 27, 2002"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "June 27, 2006"
#define RX_OFFSET 2
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index fa50eb889408..34bdba9eec79 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -231,12 +231,12 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
irq = pdev->irq;
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
- printk (KERN_ERR PFX "no I/O resource at PCI BAR #0\n");
+ dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
return -ENODEV;
}
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
- printk (KERN_ERR PFX "I/O resource 0x%x @ 0x%lx busy\n",
+ dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
return -EBUSY;
}
@@ -263,7 +263,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
/* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */
dev = alloc_ei_netdev();
if (!dev) {
- printk (KERN_ERR PFX "cannot allocate ethernet device\n");
+ dev_err(&pdev->dev, "cannot allocate ethernet device\n");
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
@@ -281,7 +281,8 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
/* Limit wait: '2' avoids jiffy roll-over. */
if (jiffies - reset_start_time > 2) {
- printk(KERN_ERR PFX "Card failure (no reset ack).\n");
+ dev_err(&pdev->dev,
+ "Card failure (no reset ack).\n");
goto err_out_free_netdev;
}
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a68bf474f6ed..d4be207d321a 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -1,17 +1,12 @@
/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
*
- * Copyright 1996,1997 Jan-Pascal van Best and Andreas Mohr.
+ * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* The authors may be reached as:
- * jvbest@wi.leidenuniv.nl a.mohr@mailto.de
- * or by snail mail as
- * Jan-Pascal van Best Andreas Mohr
- * Klikspaanweg 58-4 Stauferstr. 6
- * 2324 LZ Leiden D-71272 Renningen
- * The Netherlands Germany
+ * janpascal@vanbest.org andi@lisas.de
*
* Sources:
* Donald Becker's "skeleton.c"
@@ -27,8 +22,9 @@
* 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
* 970623 v1.00: First kernel version (AM)
* 970814 v1.01: Added detection of onboard receive buffer size (AM)
+ * 060611 v1.02: slight cleanup: email addresses, driver modernization.
* Bugs:
- * - None known...
+ * - not SMP-safe (no locking of I/O accesses)
* - Note that you have to patch ifconfig for the new /proc/net/dev
* format. It gives incorrect stats otherwise.
*
@@ -39,7 +35,7 @@
* Complete merge with Andreas' driver
* Implement ring buffers (Is this useful? You can't squeeze
* too many packet in a 2k buffer!)
- * Implement DMA (Again, is this useful? Some docs says DMA is
+ * Implement DMA (Again, is this useful? Some docs say DMA is
* slower than programmed I/O)
*
* Compile with:
@@ -47,7 +43,7 @@
* -DMODULE -c ni5010.c
*
* Insert with e.g.:
- * insmod ni5010.o io=0x300 irq=5
+ * insmod ni5010.ko io=0x300 irq=5
*/
#include <linux/module.h>
@@ -69,15 +65,15 @@
#include "ni5010.h"
-static const char *boardname = "NI5010";
-static char *version =
- "ni5010.c: v1.00 06/23/97 Jan-Pascal van Best and Andreas Mohr\n";
+static const char boardname[] = "NI5010";
+static char version[] __initdata =
+ "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
/* bufsize_rcv == 0 means autoprobing */
static unsigned int bufsize_rcv;
-#define jumpered_interrupts /* IRQ line jumpered on board */
-#undef jumpered_dma /* No DMA used */
+#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
+#undef JUMPERED_DMA /* No DMA used */
#undef FULL_IODETECT /* Only detect in portlist */
#ifndef FULL_IODETECT
@@ -281,7 +277,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
-#ifdef jumpered_interrupts
+#ifdef JUMPERED_INTERRUPTS
if (dev->irq == 0xff)
;
else if (dev->irq < 2) {
@@ -305,7 +301,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
} else if (dev->irq == 2) {
dev->irq = 9;
}
-#endif /* jumpered_irq */
+#endif /* JUMPERED_INTERRUPTS */
PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
/* DMA is not supported (yet?), so no use detecting it */
@@ -334,7 +330,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
outw(0, IE_GP); /* Point GP at start of packet */
outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
}
- printk("// bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
+ printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
memset(dev->priv, 0, sizeof(struct ni5010_local));
dev->open = ni5010_open;
@@ -354,11 +350,9 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
- if (dev->dma) printk(" & DMA %d", dev->dma);
+ if (dev->dma)
+ printk(" & DMA %d", dev->dma);
printk(".\n");
-
- printk(KERN_INFO "Join the NI5010 driver development team!\n");
- printk(KERN_INFO "Mail to a.mohr@mailto.de or jvbest@wi.leidenuniv.nl\n");
return 0;
out:
release_region(dev->base_addr, NI5010_IO_EXTENT);
@@ -371,7 +365,7 @@ out:
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
- * there is non-reboot way to recover if something goes wrong.
+ * there is a non-reboot way to recover if something goes wrong.
*/
static int ni5010_open(struct net_device *dev)
@@ -390,13 +384,13 @@ static int ni5010_open(struct net_device *dev)
* Always allocate the DMA channel after the IRQ,
* and clean up on failure.
*/
-#ifdef jumpered_dma
+#ifdef JUMPERED_DMA
if (request_dma(dev->dma, cardname)) {
printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
free_irq(dev->irq, NULL);
return -EAGAIN;
}
-#endif /* jumpered_dma */
+#endif /* JUMPERED_DMA */
PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
/* Reset the hardware here. Don't forget to set the station address. */
@@ -633,7 +627,7 @@ static int ni5010_close(struct net_device *dev)
int ioaddr = dev->base_addr;
PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
-#ifdef jumpered_interrupts
+#ifdef JUMPERED_INTERRUPTS
free_irq(dev->irq, NULL);
#endif
/* Put card in held-RESET state */
@@ -771,7 +765,7 @@ module_param(irq, int, 0);
MODULE_PARM_DESC(io, "ni5010 I/O base address");
MODULE_PARM_DESC(irq, "ni5010 IRQ number");
-int init_module(void)
+static int __init ni5010_init_module(void)
{
PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
/*
@@ -792,13 +786,15 @@ int init_module(void)
return 0;
}
-void cleanup_module(void)
+static void __exit ni5010_cleanup_module(void)
{
PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
unregister_netdev(dev_ni5010);
release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
free_netdev(dev_ni5010);
}
+module_init(ni5010_init_module);
+module_exit(ni5010_cleanup_module);
#endif /* MODULE */
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index fa854c8fde75..4d52ecf8af56 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
-int init_module(void)
+int __init init_module(void)
{
if(io <= 0x0 || !memend || !memstart || irq < 2) {
printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index bb42ff218484..810cc572f5f7 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
MODULE_PARM_DESC(io, "ni6510 I/O base address");
MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-int init_module(void)
+int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 70429108c40d..0e76859c90a2 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -803,7 +803,7 @@ static int ns83820_setup_rx(struct net_device *ndev)
writel(dev->IMR_cache, dev->base + IMR);
writel(1, dev->base + IER);
- spin_unlock_irq(&dev->misc_lock);
+ spin_unlock(&dev->misc_lock);
kick_rx(ndev);
@@ -1012,8 +1012,6 @@ static void do_tx_done(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
u32 cmdsts, tx_done_idx, *desc;
- spin_lock_irq(&dev->tx_lock);
-
dprintk("do_tx_done(%p)\n", ndev);
tx_done_idx = dev->tx_done_idx;
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
@@ -1069,7 +1067,6 @@ static void do_tx_done(struct net_device *ndev)
netif_start_queue(ndev);
netif_wake_queue(ndev);
}
- spin_unlock_irq(&dev->tx_lock);
}
static void ns83820_cleanup_tx(struct ns83820 *dev)
@@ -1281,11 +1278,13 @@ static struct ethtool_ops ops = {
.get_link = ns83820_get_link
};
+/* this function is called in irq context from the ISR */
static void ns83820_mib_isr(struct ns83820 *dev)
{
- spin_lock(&dev->misc_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&dev->misc_lock, flags);
ns83820_update_stats(dev);
- spin_unlock(&dev->misc_lock);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
}
static void ns83820_do_isr(struct net_device *ndev, u32 isr);
@@ -1307,6 +1306,8 @@ static irqreturn_t ns83820_irq(int foo, void *data, struct pt_regs *regs)
static void ns83820_do_isr(struct net_device *ndev, u32 isr)
{
struct ns83820 *dev = PRIV(ndev);
+ unsigned long flags;
+
#ifdef DEBUG
if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC))
Dprintk("odd isr? 0x%08x\n", isr);
@@ -1321,10 +1322,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
if ((ISR_RXDESC | ISR_RXOK) & isr) {
prefetch(dev->rx_info.next_rx_desc);
- spin_lock_irq(&dev->misc_lock);
+ spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
writel(dev->IMR_cache, dev->base + IMR);
- spin_unlock_irq(&dev->misc_lock);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
tasklet_schedule(&dev->rx_tasklet);
//rx_irq(ndev);
@@ -1370,16 +1371,18 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
* work has accumulated
*/
if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) {
+ spin_lock_irqsave(&dev->tx_lock, flags);
do_tx_done(ndev);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
/* Disable TxOk if there are no outstanding tx packets.
*/
if ((dev->tx_done_idx == dev->tx_free_idx) &&
(dev->IMR_cache & ISR_TXOK)) {
- spin_lock_irq(&dev->misc_lock);
+ spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache &= ~ISR_TXOK;
writel(dev->IMR_cache, dev->base + IMR);
- spin_unlock_irq(&dev->misc_lock);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
}
}
@@ -1390,10 +1393,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
* nature are expected, we must enable TxOk.
*/
if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
- spin_lock_irq(&dev->misc_lock);
+ spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache |= ISR_TXOK;
writel(dev->IMR_cache, dev->base + IMR);
- spin_unlock_irq(&dev->misc_lock);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
}
/* MIB interrupt: one of the statistics counters is about to overflow */
@@ -1455,7 +1458,7 @@ static void ns83820_tx_timeout(struct net_device *ndev)
u32 tx_done_idx, *desc;
unsigned long flags;
- local_irq_save(flags);
+ spin_lock_irqsave(&dev->tx_lock, flags);
tx_done_idx = dev->tx_done_idx;
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
@@ -1482,7 +1485,7 @@ static void ns83820_tx_timeout(struct net_device *ndev)
ndev->name,
tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
}
static void ns83820_tx_watch(unsigned long data)
@@ -1832,7 +1835,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
} else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
using_dac = 0;
} else {
- printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n");
+ dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
return -ENODEV;
}
@@ -1855,7 +1858,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_INFO "ns83820: pci_enable_dev failed: %d\n", err);
+ dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err);
goto out_free;
}
@@ -1884,8 +1887,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED,
DRV_NAME, ndev);
if (err) {
- printk(KERN_INFO "ns83820: unable to register irq %d\n",
- pci_dev->irq);
+ dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n",
+ pci_dev->irq, err);
goto out_disable;
}
@@ -1899,7 +1902,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
rtnl_lock();
err = dev_alloc_name(ndev, ndev->name);
if (err < 0) {
- printk(KERN_INFO "ns83820: unable to get netdev name: %d\n", err);
+ dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err);
goto out_free_irq;
}
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 3388ee1313ea..e0e293964042 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -601,7 +601,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
/* dev zeroed in alloc_etherdev */
dev = alloc_etherdev (sizeof (*tp));
if (dev == NULL) {
- printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
DPRINTK ("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
@@ -631,14 +631,14 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
- printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n");
+ dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
/* make sure PCI base addr 1 is MMIO */
if (!(mmio_flags & IORESOURCE_MEM)) {
- printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n");
+ dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
@@ -646,12 +646,12 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
/* check for weird/broken PCI region reporting */
if ((pio_len < NETDRV_MIN_IO_SIZE) ||
(mmio_len < NETDRV_MIN_IO_SIZE)) {
- printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+ dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
- rc = pci_request_regions (pdev, "pci-skeleton");
+ rc = pci_request_regions (pdev, MODNAME);
if (rc)
goto err_out;
@@ -663,7 +663,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
/* ioremap MMIO region */
ioaddr = ioremap (mmio_start, mmio_len);
if (ioaddr == NULL) {
- printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res;
}
@@ -699,9 +699,10 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
- printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8139\n",
- pci_name(pdev));
- printk (KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n", pci_name(pdev), NETDRV_R32 (TxConfig));
+ dev_printk (KERN_DEBUG, &pdev->dev,
+ "unknown chip version, assuming RTL-8139\n");
+ dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n",
+ NETDRV_R32 (TxConfig));
tp->chipset = 0;
match:
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 9bae77ce1314..4122bb46f5ff 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -345,6 +345,7 @@ typedef struct local_info_t {
void __iomem *dingo_ccr; /* only used for CEM56 cards */
unsigned last_ptr_value; /* last packets transmitted value */
const char *manf_str;
+ struct work_struct tx_timeout_task;
} local_info_t;
/****************
@@ -352,6 +353,7 @@ typedef struct local_info_t {
*/
static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void do_tx_timeout(struct net_device *dev);
+static void xirc2ps_tx_timeout_task(void *data);
static struct net_device_stats *do_get_stats(struct net_device *dev);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link)
#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = do_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
#endif
return xirc2ps_config(link);
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*====================================================================*/
static void
-do_tx_timeout(struct net_device *dev)
+xirc2ps_tx_timeout_task(void *data)
{
- local_info_t *lp = netdev_priv(dev);
- printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
- lp->stats.tx_errors++;
+ struct net_device *dev = data;
/* reset the card */
do_reset(dev,1);
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
+static void
+do_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ lp->stats.tx_errors++;
+ printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ schedule_work(&lp->tx_timeout_task);
+}
+
static int
do_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d768f3d1ac28..d50bcb89dd28 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -58,18 +58,15 @@ static const char *const version =
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
static struct pci_device_id pcnet32_pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
/*
* Adapters that were sold with IBM's RS/6000 or pSeries hardware have
* the incorrect vendor id.
*/
- { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
{ } /* terminate list */
};
@@ -188,6 +185,25 @@ static int homepna[MAX_UNITS];
#define PCNET32_TOTAL_SIZE 0x20
+#define CSR0 0
+#define CSR0_INIT 0x1
+#define CSR0_START 0x2
+#define CSR0_STOP 0x4
+#define CSR0_TXPOLL 0x8
+#define CSR0_INTEN 0x40
+#define CSR0_IDON 0x0100
+#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
+#define PCNET32_INIT_LOW 1
+#define PCNET32_INIT_HIGH 2
+#define CSR3 3
+#define CSR4 4
+#define CSR5 5
+#define CSR5_SUSPEND 0x0001
+#define CSR15 15
+#define PCNET32_MC_FILTER 8
+
+#define PCNET32_79C970A 0x2621
+
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head {
u32 base;
@@ -275,9 +291,9 @@ struct pcnet32_private {
/* each bit indicates an available PHY */
u32 phymask;
+ unsigned short chip_version; /* which variant this is */
};
-static void pcnet32_probe_vlbus(void);
static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
static int pcnet32_open(struct net_device *);
@@ -419,6 +435,238 @@ static struct pcnet32_access pcnet32_dwio = {
.reset = pcnet32_dwio_reset
};
+static void pcnet32_netif_stop(struct net_device *dev)
+{
+ dev->trans_start = jiffies;
+ netif_poll_disable(dev);
+ netif_tx_disable(dev);
+}
+
+static void pcnet32_netif_start(struct net_device *dev)
+{
+ netif_wake_queue(dev);
+ netif_poll_enable(dev);
+}
+
+/*
+ * Allocate space for the new sized tx ring.
+ * Free old resources
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_tx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_tx_head *new_tx_ring;
+ struct sk_buff **new_skb_list;
+
+ pcnet32_purge_tx_ring(dev);
+
+ new_tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_tx_ring == NULL) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Consistent memory allocation failed.\n",
+ dev->name);
+ return;
+ }
+ memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Memory allocation failed.\n", dev->name);
+ goto free_new_tx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Memory allocation failed.\n", dev->name);
+ goto free_new_lists;
+ }
+
+ kfree(lp->tx_skbuff);
+ kfree(lp->tx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+ lp->tx_ring_size = (1 << size);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (size << 12);
+ lp->tx_ring = new_tx_ring;
+ lp->tx_ring_dma_addr = new_ring_dma_addr;
+ lp->tx_dma_addr = new_dma_addr_list;
+ lp->tx_skbuff = new_skb_list;
+ return;
+
+ free_new_lists:
+ kfree(new_dma_addr_list);
+ free_new_tx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ new_tx_ring,
+ new_ring_dma_addr);
+ return;
+}
+
+/*
+ * Allocate space for the new sized rx ring.
+ * Re-use old receive buffers.
+ * alloc extra buffers
+ * free unneeded buffers
+ * free unneeded buffers
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_rx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_rx_head *new_rx_ring;
+ struct sk_buff **new_skb_list;
+ int new, overlap;
+
+ new_rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_rx_ring == NULL) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Consistent memory allocation failed.\n",
+ dev->name);
+ return;
+ }
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Memory allocation failed.\n", dev->name);
+ goto free_new_rx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ if (netif_msg_drv(lp))
+ printk("\n" KERN_ERR
+ "%s: Memory allocation failed.\n", dev->name);
+ goto free_new_lists;
+ }
+
+ /* first copy the current receive buffers */
+ overlap = min(size, lp->rx_ring_size);
+ for (new = 0; new < overlap; new++) {
+ new_rx_ring[new] = lp->rx_ring[new];
+ new_dma_addr_list[new] = lp->rx_dma_addr[new];
+ new_skb_list[new] = lp->rx_skbuff[new];
+ }
+ /* now allocate any new buffers needed */
+ for (; new < size; new++ ) {
+ struct sk_buff *rx_skbuff;
+ new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
+ if (!(rx_skbuff = new_skb_list[new])) {
+ /* keep the original lists and buffers */
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
+ dev->name);
+ goto free_all_new;
+ }
+ skb_reserve(rx_skbuff, 2);
+
+ new_dma_addr_list[new] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
+ new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
+ new_rx_ring[new].status = le16_to_cpu(0x8000);
+ }
+ /* and free any unneeded buffers */
+ for (; new < lp->rx_ring_size; new++) {
+ if (lp->rx_skbuff[new]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[new]);
+ }
+ }
+
+ kfree(lp->rx_skbuff);
+ kfree(lp->rx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+ lp->rx_ring_size = (1 << size);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (size << 4);
+ lp->rx_ring = new_rx_ring;
+ lp->rx_ring_dma_addr = new_ring_dma_addr;
+ lp->rx_dma_addr = new_dma_addr_list;
+ lp->rx_skbuff = new_skb_list;
+ return;
+
+ free_all_new:
+ for (; --new >= lp->rx_ring_size; ) {
+ if (new_skb_list[new]) {
+ pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(new_skb_list[new]);
+ }
+ }
+ kfree(new_skb_list);
+ free_new_lists:
+ kfree(new_dma_addr_list);
+ free_new_rx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ new_rx_ring,
+ new_ring_dma_addr);
+ return;
+}
+
+static void pcnet32_purge_rx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pcnet32_poll_controller(struct net_device *dev)
{
@@ -479,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
r = mii_link_ok(&lp->mii_if);
- } else {
+ } else if (lp->chip_version >= PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -519,10 +769,10 @@ static void pcnet32_get_ringparam(struct net_device *dev,
{
struct pcnet32_private *lp = dev->priv;
- ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
- ering->tx_pending = lp->tx_ring_size - 1;
- ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
- ering->rx_pending = lp->rx_ring_size - 1;
+ ering->tx_max_pending = TX_MAX_RING_SIZE;
+ ering->tx_pending = lp->tx_ring_size;
+ ering->rx_max_pending = RX_MAX_RING_SIZE;
+ ering->rx_pending = lp->rx_ring_size;
}
static int pcnet32_set_ringparam(struct net_device *dev,
@@ -530,56 +780,53 @@ static int pcnet32_set_ringparam(struct net_device *dev,
{
struct pcnet32_private *lp = dev->priv;
unsigned long flags;
+ unsigned int size;
+ ulong ioaddr = dev->base_addr;
int i;
if (ering->rx_mini_pending || ering->rx_jumbo_pending)
return -EINVAL;
if (netif_running(dev))
- pcnet32_close(dev);
+ pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags);
- pcnet32_free_ring(dev);
- lp->tx_ring_size =
- min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
- lp->rx_ring_size =
- min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
/* set the minimum ring size to 4, to allow the loopback test to work
* unchanged.
*/
for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
- if (lp->tx_ring_size <= (1 << i))
+ if (size <= (1 << i))
break;
}
- lp->tx_ring_size = (1 << i);
- lp->tx_mod_mask = lp->tx_ring_size - 1;
- lp->tx_len_bits = (i << 12);
-
+ if ((1 << i) != lp->tx_ring_size)
+ pcnet32_realloc_tx_ring(dev, lp, i);
+
+ size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
- if (lp->rx_ring_size <= (1 << i))
+ if (size <= (1 << i))
break;
}
- lp->rx_ring_size = (1 << i);
- lp->rx_mod_mask = lp->rx_ring_size - 1;
- lp->rx_len_bits = (i << 4);
+ if ((1 << i) != lp->rx_ring_size)
+ pcnet32_realloc_rx_ring(dev, lp, i);
+
+ dev->weight = lp->rx_ring_size / 2;
- if (pcnet32_alloc_ring(dev, dev->name)) {
- pcnet32_free_ring(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- return -ENOMEM;
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
}
spin_unlock_irqrestore(&lp->lock, flags);
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_INFO PFX
+ if (netif_msg_drv(lp))
+ printk(KERN_INFO
"%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
lp->rx_ring_size, lp->tx_ring_size);
- if (netif_running(dev))
- pcnet32_open(dev);
-
return 0;
}
@@ -633,29 +880,27 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
unsigned long flags;
unsigned long ticks;
- *data1 = 1; /* status of test, default to fail */
rc = 1; /* default to fail */
if (netif_running(dev))
pcnet32_close(dev);
spin_lock_irqsave(&lp->lock, flags);
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
/* Reset the PCNET32 */
lp->a.reset(ioaddr);
+ lp->a.write_csr(ioaddr, CSR4, 0x0915);
/* switch pcnet32 to 32bit mode */
lp->a.write_bcr(ioaddr, 20, 2);
- lp->init_block.mode =
- le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- lp->init_block.filter[0] = 0;
- lp->init_block.filter[1] = 0;
-
/* purge & init rings but don't actually restart */
pcnet32_restart(dev, 0x0000);
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
/* Initialize Transmit buffers. */
size = data_len + 15;
@@ -697,14 +942,15 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
}
- x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
- x = x | 0x0002;
- a->write_bcr(ioaddr, 32, x);
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
+ a->write_bcr(ioaddr, 32, x | 0x0002);
- lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
+ /* set int loopback in CSR15 */
+ x = a->read_csr(ioaddr, CSR15) & 0xfffc;
+ lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
teststatus = le16_to_cpu(0x8000);
- lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
+ lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
/* Check status of descriptors */
for (x = 0; x < numbuffs; x++) {
@@ -712,7 +958,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
rmb();
while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
+ msleep(1);
spin_lock_irqsave(&lp->lock, flags);
rmb();
ticks++;
@@ -725,7 +971,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
}
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
@@ -758,25 +1004,24 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
x++;
}
- if (!rc) {
- *data1 = 0;
- }
clean_up:
+ *data1 = rc;
pcnet32_purge_tx_ring(dev);
- x = a->read_csr(ioaddr, 15) & 0xFFFF;
- a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
- x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
- x = x & ~0x0002;
- a->write_bcr(ioaddr, 32, x);
+ x = a->read_csr(ioaddr, CSR15);
+ a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
- spin_unlock_irqrestore(&lp->lock, flags);
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ a->write_bcr(ioaddr, 32, (x & ~0x0002));
if (netif_running(dev)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
pcnet32_open(dev);
} else {
+ pcnet32_purge_rx_ring(dev);
lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ spin_unlock_irqrestore(&lp->lock, flags);
}
return (rc);
@@ -839,6 +1084,47 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
return 0;
}
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Error getting into suspend!\n",
+ dev->name);
+ return 0;
+ }
+ }
+ return 1;
+}
+
#define PCNET32_REGS_PER_PHY 32
#define PCNET32_MAX_PHYS 32
static int pcnet32_get_regs_len(struct net_device *dev)
@@ -857,32 +1143,13 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
struct pcnet32_private *lp = dev->priv;
struct pcnet32_access *a = &lp->a;
ulong ioaddr = dev->base_addr;
- int ticks;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
- csr0 = a->read_csr(ioaddr, 0);
- if (!(csr0 & 0x0004)) { /* If not stopped */
- /* set SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0001);
-
- /* poll waiting for bit to be set */
- ticks = 0;
- while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
- spin_lock_irqsave(&lp->lock, flags);
- ticks++;
- if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error getting into suspend!\n",
- dev->name);
- break;
- }
- }
- }
+ csr0 = a->read_csr(ioaddr, CSR0);
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_suspend(dev, &flags, 1);
/* read address PROM */
for (i = 0; i < 16; i += 2)
@@ -919,9 +1186,12 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
}
- if (!(csr0 & 0x0004)) { /* If not stopped */
+ if (!(csr0 & CSR0_STOP)) { /* If not stopped */
+ int csr5;
+
/* clear SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0000);
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -952,7 +1222,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
/* only probes for non-PCI devices, the rest are handled by
* pci_register_driver via pcnet32_probe_pci */
-static void __devinit pcnet32_probe_vlbus(void)
+static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
{
unsigned int *port, ioaddr;
@@ -1268,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->mii_if.reg_num_mask = 0x1f;
lp->dxsuflo = dxsuflo;
lp->mii = mii;
+ lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug;
if ((cards_found >= MAX_UNITS)
|| (options[cards_found] > sizeof(options_mapping)))
@@ -1436,7 +1707,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
lp->tx_ring_size,
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Consistent memory allocation failed.\n",
name);
@@ -1448,52 +1719,48 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
lp->rx_ring_size,
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Consistent memory allocation failed.\n",
name);
return -ENOMEM;
}
- lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
+ lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->tx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
- memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
- lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
+ lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->rx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
- memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
- lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
+ lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->tx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
- memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
- lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
+ lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->rx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
+ if (netif_msg_drv(lp))
printk("\n" KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
- memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
return 0;
}
@@ -1582,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev)
val |= 2;
} else if (lp->options & PCNET32_PORT_ASEL) {
/* workaround of xSeries250, turn on for 79C975 only */
- i = ((lp->a.read_csr(ioaddr, 88) |
- (lp->a.
- read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
- if (i == 0x2627)
+ if (lp->chip_version == 0x2627)
val |= 3;
}
lp->a.write_bcr(ioaddr, 9, val);
@@ -1729,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev)
netif_start_queue(dev);
- /* Print the link status and start the watchdog */
- pcnet32_check_media(dev, 1);
- mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ if (lp->chip_version >= PCNET32_79C970A) {
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ }
i = 0;
while (i++ < 100)
@@ -1757,16 +2023,7 @@ static int pcnet32_open(struct net_device *dev)
err_free_ring:
/* free any allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
- }
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
+ pcnet32_purge_rx_ring(dev);
/*
* Switch back to 16bit mode to avoid problems with dumb
@@ -2348,7 +2605,6 @@ static int pcnet32_close(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
struct pcnet32_private *lp = dev->priv;
- int i;
unsigned long flags;
del_timer_sync(&lp->watchdog_timer);
@@ -2379,31 +2635,8 @@ static int pcnet32_close(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- /* free all allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- wmb(); /* Make sure adapter sees owner change */
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
- }
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
-
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb(lp->tx_skbuff[i]);
- }
- lp->tx_skbuff[i] = NULL;
- lp->tx_dma_addr[i] = 0;
- }
+ pcnet32_purge_rx_ring(dev);
+ pcnet32_purge_tx_ring(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2433,6 +2666,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
volatile struct pcnet32_init_block *ib = &lp->init_block;
volatile u16 *mcast_table = (u16 *) & ib->filter;
struct dev_mc_list *dmi = dev->mc_list;
+ unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
u32 crc;
@@ -2441,6 +2675,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
ib->filter[0] = 0xffffffff;
ib->filter[1] = 0xffffffff;
+ lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
+ lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
+ lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
+ lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
return;
}
/* clear the multicast filter */
@@ -2462,6 +2700,9 @@ static void pcnet32_load_multicast(struct net_device *dev)
le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
(1 << (crc & 0xf)));
}
+ for (i = 0; i < 4; i++)
+ lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
+ le16_to_cpu(mcast_table[i]));
return;
}
@@ -2472,8 +2713,11 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr, flags;
struct pcnet32_private *lp = dev->priv;
+ int csr15, suspended;
spin_lock_irqsave(&lp->lock, flags);
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ csr15 = lp->a.read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
if (netif_msg_hw(lp))
@@ -2482,15 +2726,24 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
lp->init_block.mode =
le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
+ lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
} else {
lp->init_block.mode =
le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
pcnet32_load_multicast(dev);
}
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
- pcnet32_restart(dev, 0x0042); /* Resume normal operation */
- netif_wake_queue(dev);
+ if (suspended) {
+ int csr5;
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = lp->a.read_csr(ioaddr, CSR5);
+ lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ } else {
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ netif_wake_queue(dev);
+ }
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -2730,7 +2983,7 @@ static int __init pcnet32_init_module(void)
/* should we find any remaining VLbus devices ? */
if (pcnet32vlb)
- pcnet32_probe_vlbus();
+ pcnet32_probe_vlbus(pcnet32_portlist);
if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2ba6d3a40e2e..b79ec0d7480f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,5 +56,22 @@ config SMSC_PHY
---help---
Currently supports the LAN83C185 PHY
+config FIXED_PHY
+ tristate "Drivers for PHY emulation on fixed speed/link"
+ depends on PHYLIB
+ ---help---
+ Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
+ but with the ability to manipulate with speed/link in software. The relavant MII
+ speed/duplex parameters could be effectively handled in user-specified fuction.
+ Currently tested with mpc866ads.
+
+config FIXED_MII_10_FDX
+ bool "Emulation for 10M Fdx fixed PHY behavior"
+ depends on FIXED_PHY
+
+config FIXED_MII_100_FDX
+ bool "Emulation for 100M Fdx fixed PHY behavior"
+ depends on FIXED_PHY
+
endmenu
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a00e61942525..320f8323123f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 3efb715c28dc..ae60e6e4107c 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -103,7 +103,22 @@ static int cis820x_config_intr(struct phy_device *phydev)
return err;
}
-/* Cicada 820x */
+/* Cicada 8201, a.k.a Vitesse VSC8201 */
+static struct phy_driver cis8201_driver = {
+ .phy_id = 0x000fc410,
+ .name = "Cicada Cis8201",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &cis820x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &cis820x_ack_interrupt,
+ .config_intr = &cis820x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+/* Cicada 8204 */
static struct phy_driver cis8204_driver = {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
@@ -118,15 +133,30 @@ static struct phy_driver cis8204_driver = {
.driver = { .owner = THIS_MODULE,},
};
-static int __init cis8204_init(void)
+static int __init cicada_init(void)
{
- return phy_driver_register(&cis8204_driver);
+ int ret;
+
+ ret = phy_driver_register(&cis8204_driver);
+ if (ret)
+ goto err1;
+
+ ret = phy_driver_register(&cis8201_driver);
+ if (ret)
+ goto err2;
+ return 0;
+
+err2:
+ phy_driver_unregister(&cis8204_driver);
+err1:
+ return ret;
}
-static void __exit cis8204_exit(void)
+static void __exit cicada_exit(void)
{
phy_driver_unregister(&cis8204_driver);
+ phy_driver_unregister(&cis8201_driver);
}
-module_init(cis8204_init);
-module_exit(cis8204_exit);
+module_init(cicada_init);
+module_exit(cicada_exit);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
new file mode 100644
index 000000000000..341036df4710
--- /dev/null
+++ b/drivers/net/phy/fixed.c
@@ -0,0 +1,358 @@
+/*
+ * drivers/net/phy/fixed.c
+ *
+ * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
+ *
+ * Author: Vitaly Bordug
+ *
+ * Copyright (c) 2006 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_REGS_NUM 7
+
+/*
+ The idea is to emulate normal phy behavior by responding with
+ pre-defined values to mii BMCR read, so that read_status hook could
+ take all the needed info.
+*/
+
+struct fixed_phy_status {
+ u8 link;
+ u16 speed;
+ u8 duplex;
+};
+
+/*-----------------------------------------------------------------------------
+ * Private information hoder for mii_bus
+ *-----------------------------------------------------------------------------*/
+struct fixed_info {
+ u16 *regs;
+ u8 regs_num;
+ struct fixed_phy_status phy_status;
+ struct phy_device *phydev; /* pointer to the container */
+ /* link & speed cb */
+ int(*link_update)(struct net_device*, struct fixed_phy_status*);
+
+};
+
+/*-----------------------------------------------------------------------------
+ * If something weird is required to be done with link/speed,
+ * network driver is able to assign a function to implement this.
+ * May be useful for PHY's that need to be software-driven.
+ *-----------------------------------------------------------------------------*/
+int fixed_mdio_set_link_update(struct phy_device* phydev,
+ int(*link_update)(struct net_device*, struct fixed_phy_status*))
+{
+ struct fixed_info *fixed;
+
+ if(link_update == NULL)
+ return -EINVAL;
+
+ if(phydev) {
+ if(phydev->bus) {
+ fixed = phydev->bus->priv;
+ fixed->link_update = link_update;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fixed_mdio_set_link_update);
+
+/*-----------------------------------------------------------------------------
+ * This is used for updating internal mii regs from the status
+ *-----------------------------------------------------------------------------*/
+static int fixed_mdio_update_regs(struct fixed_info *fixed)
+{
+ u16 *regs = fixed->regs;
+ u16 bmsr = 0;
+ u16 bmcr = 0;
+
+ if(!regs) {
+ printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if(fixed->phy_status.link)
+ bmsr |= BMSR_LSTATUS;
+
+ if(fixed->phy_status.duplex) {
+ bmcr |= BMCR_FULLDPLX;
+
+ switch ( fixed->phy_status.speed ) {
+ case 100:
+ bmsr |= BMSR_100FULL;
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case 10:
+ bmsr |= BMSR_10FULL;
+ break;
+ }
+ } else {
+ switch ( fixed->phy_status.speed ) {
+ case 100:
+ bmsr |= BMSR_100HALF;
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case 10:
+ bmsr |= BMSR_100HALF;
+ break;
+ }
+ }
+
+ regs[MII_BMCR] = bmcr;
+ regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/
+
+ return 0;
+}
+
+static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ struct fixed_info *fixed = bus->priv;
+
+ /* if user has registered link update callback, use it */
+ if(fixed->phydev)
+ if(fixed->phydev->attached_dev) {
+ if(fixed->link_update) {
+ fixed->link_update(fixed->phydev->attached_dev,
+ &fixed->phy_status);
+ fixed_mdio_update_regs(fixed);
+ }
+ }
+
+ if ((unsigned int)location >= fixed->regs_num)
+ return -1;
+ return fixed->regs[location];
+}
+
+static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ /* do nothing for now*/
+ return 0;
+}
+
+static int fixed_mii_reset(struct mii_bus *bus)
+{
+ /*nothing here - no way/need to reset it*/
+ return 0;
+}
+
+static int fixed_config_aneg(struct phy_device *phydev)
+{
+ /* :TODO:03/13/2006 09:45:37 PM::
+ The full autoneg funcionality can be emulated,
+ but no need to have anything here for now
+ */
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * the manual bind will do the magic - with phy_id_mask == 0
+ * match will never return true...
+ *-----------------------------------------------------------------------------*/
+static struct phy_driver fixed_mdio_driver = {
+ .name = "Fixed PHY",
+ .features = PHY_BASIC_FEATURES,
+ .config_aneg = fixed_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+/*-----------------------------------------------------------------------------
+ * This func is used to create all the necessary stuff, bind
+ * the fixed phy driver and register all it on the mdio_bus_type.
+ * speed is either 10 or 100, duplex is boolean.
+ * number is used to create multiple fixed PHYs, so that several devices can
+ * utilize them simultaneously.
+ *-----------------------------------------------------------------------------*/
+static int fixed_mdio_register_device(int number, int speed, int duplex)
+{
+ struct mii_bus *new_bus;
+ struct fixed_info *fixed;
+ struct phy_device *phydev;
+ int err = 0;
+
+ struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+
+ if (NULL == dev)
+ return -ENOMEM;
+
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus) {
+ kfree(dev);
+ return -ENOMEM;
+ }
+ fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
+
+ if (NULL == fixed) {
+ kfree(dev);
+ kfree(new_bus);
+ return -ENOMEM;
+ }
+
+ fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL);
+ fixed->regs_num = MII_REGS_NUM;
+ fixed->phy_status.speed = speed;
+ fixed->phy_status.duplex = duplex;
+ fixed->phy_status.link = 1;
+
+ new_bus->name = "Fixed MII Bus",
+ new_bus->read = &fixed_mii_read,
+ new_bus->write = &fixed_mii_write,
+ new_bus->reset = &fixed_mii_reset,
+
+ /*set up workspace*/
+ fixed_mdio_update_regs(fixed);
+ new_bus->priv = fixed;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ /* create phy_device and register it on the mdio bus */
+ phydev = phy_device_create(new_bus, 0, 0);
+
+ /*
+ Put the phydev pointer into the fixed pack so that bus read/write code could
+ be able to access for instance attached netdev. Well it doesn't have to do
+ so, only in case of utilizing user-specified link-update...
+ */
+ fixed->phydev = phydev;
+
+ if(NULL == phydev) {
+ err = -ENOMEM;
+ goto device_create_fail;
+ }
+
+ phydev->irq = -1;
+ phydev->dev.bus = &mdio_bus_type;
+
+ if(number)
+ snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
+ "fixed_%d@%d:%d", number, speed, duplex);
+ else
+ snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
+ "fixed@%d:%d", speed, duplex);
+ phydev->bus = new_bus;
+
+ err = device_register(&phydev->dev);
+ if(err) {
+ printk(KERN_ERR "Phy %s failed to register\n",
+ phydev->dev.bus_id);
+ goto bus_register_fail;
+ }
+
+ /*
+ the mdio bus has phy_id match... In order not to do it
+ artificially, we are binding the driver here by hand;
+ it will be the same for all the fixed phys anyway.
+ */
+ down_write(&phydev->dev.bus->subsys.rwsem);
+
+ phydev->dev.driver = &fixed_mdio_driver.driver;
+
+ err = phydev->dev.driver->probe(&phydev->dev);
+ if(err < 0) {
+ printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
+ up_write(&phydev->dev.bus->subsys.rwsem);
+ goto probe_fail;
+ }
+
+ device_bind_driver(&phydev->dev);
+ up_write(&phydev->dev.bus->subsys.rwsem);
+
+ return 0;
+
+probe_fail:
+ device_unregister(&phydev->dev);
+bus_register_fail:
+ kfree(phydev);
+device_create_fail:
+ kfree(dev);
+ kfree(new_bus);
+ kfree(fixed);
+
+ return err;
+}
+
+
+MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
+MODULE_AUTHOR("Vitaly Bordug");
+MODULE_LICENSE("GPL");
+
+static int __init fixed_init(void)
+{
+ int ret;
+ int duplex = 0;
+
+ /* register on the bus... Not expected to be matched with anything there... */
+ phy_driver_register(&fixed_mdio_driver);
+
+ /* So let the fun begin...
+ We will create several mdio devices here, and will bound the upper
+ driver to them.
+
+ Then the external software can lookup the phy bus by searching
+ fixed@speed:duplex, e.g. fixed@100:1, to be connected to the
+ virtual 100M Fdx phy.
+
+ In case several virtual PHYs required, the bus_id will be in form
+ fixed_<num>@<speed>:<duplex>, which make it able even to define
+ driver-specific link control callback, if for instance PHY is completely
+ SW-driven.
+
+ */
+
+#ifdef CONFIG_FIXED_MII_DUPLEX
+ duplex = 1;
+#endif
+
+#ifdef CONFIG_FIXED_MII_100_FDX
+ fixed_mdio_register_device(0, 100, 1);
+#endif
+
+#ifdef CONFIX_FIXED_MII_10_FDX
+ fixed_mdio_register_device(0, 10, 1);
+#endif
+ return 0;
+}
+
+static void __exit fixed_exit(void)
+{
+ phy_driver_unregister(&fixed_mdio_driver);
+ /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */
+}
+
+module_init(fixed_init);
+module_exit(fixed_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 1dde390c164d..cf6660c93ffa 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = {
.suspend = mdio_bus_suspend,
.resume = mdio_bus_resume,
};
+EXPORT_SYMBOL(mdio_bus_type);
int __init mdio_bus_init(void)
{
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d5c2233c252..f5aad77288f9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev,
/* phy_stop_machine
*
- * description: Stops the state machine timer, sets the state to
- * UP (unless it wasn't up yet), and then frees the interrupt,
- * if it is in use. This function must be called BEFORE
+ * description: Stops the state machine timer, sets the state to UP
+ * (unless it wasn't up yet). This function must be called BEFORE
* phy_detach.
*/
void phy_stop_machine(struct phy_device *phydev)
@@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev)
phydev->state = PHY_UP;
spin_unlock(&phydev->lock);
- if (phydev->irq != PHY_POLL)
- phy_stop_interrupts(phydev);
-
phydev->adjust_state = NULL;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1bc1e032c5d6..2d1ecfdc80db 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver;
extern int mdio_bus_init(void);
extern void mdio_bus_exit(void);
+struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+{
+ struct phy_device *dev;
+ /* We allocate the device, and initialize the
+ * default values */
+ dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
+
+ if (NULL == dev)
+ return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+
+ dev->speed = 0;
+ dev->duplex = -1;
+ dev->pause = dev->asym_pause = 0;
+ dev->link = 1;
+
+ dev->autoneg = AUTONEG_ENABLE;
+
+ dev->addr = addr;
+ dev->phy_id = phy_id;
+ dev->bus = bus;
+
+ dev->state = PHY_DOWN;
+
+ spin_lock_init(&dev->lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(phy_device_create);
+
/* get_phy_device
*
* description: Reads the ID registers of the PHY at addr on the
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if (0xffffffff == phy_id)
return NULL;
- /* Otherwise, we allocate the device, and initialize the
- * default values */
- dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
-
- if (NULL == dev)
- return ERR_PTR(-ENOMEM);
-
- dev->speed = 0;
- dev->duplex = -1;
- dev->pause = dev->asym_pause = 0;
- dev->link = 1;
-
- dev->autoneg = AUTONEG_ENABLE;
-
- dev->addr = addr;
- dev->phy_id = phy_id;
- dev->bus = bus;
-
- dev->state = PHY_DOWN;
-
- spin_lock_init(&dev->lock);
+ dev = phy_device_create(bus, addr, phy_id);
return dev;
}
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b94..c872f7c6cce3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
-static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
{
struct channel *pch;
- pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0)
return -ENOMEM;
- memset(pch, 0, sizeof(struct channel));
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
int ret = -ENOMEM;
int i;
- ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+ ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
- memset(ppp, 0, sizeof(struct ppp));
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
}
atomic_inc(&ppp_unit_count);
- cardmap_set(&all_ppp_units, unit, ppp);
+ ret = cardmap_set(&all_ppp_units, unit, ppp);
+ if (ret != 0)
+ goto out3;
+
mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
+out3:
+ atomic_dec(&ppp_unit_count);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
return NULL;
}
-static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
+ return 0;
+ enomem:
+ return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 16a0ef1b1369..4c2f575faad7 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1406,7 +1406,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
dev = alloc_etherdev(sizeof (*tp));
if (dev == NULL) {
if (netif_msg_drv(&debug))
- printk(KERN_ERR PFX "unable to alloc new ethernet\n");
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
goto err_out;
}
@@ -1418,10 +1418,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX "%s: enable failure\n",
- pci_name(pdev));
- }
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev, "enable failure\n");
goto err_out_free_dev;
}
@@ -1437,37 +1435,32 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
} else {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev,
"PowerManagement capability not found.\n");
- }
}
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev,
"region #1 not an MMIO resource, aborting\n");
- }
rc = -ENODEV;
goto err_out_mwi;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev,
"Invalid PCI region size(s), aborting\n");
- }
rc = -ENODEV;
goto err_out_mwi;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX "%s: could not request regions.\n",
- pci_name(pdev));
- }
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev, "could not request regions.\n");
goto err_out_mwi;
}
@@ -1480,10 +1473,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- printk(KERN_ERR PFX
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev,
"DMA configuration failed.\n");
- }
goto err_out_free_res;
}
}
@@ -1494,7 +1486,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
if (ioaddr == NULL) {
if (netif_msg_probe(tp))
- printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res;
}
@@ -1526,9 +1518,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
if (i < 0) {
/* Unknown chip: assume array element #0, original RTL-8169 */
if (netif_msg_probe(tp)) {
- printk(KERN_DEBUG PFX "PCI device %s: "
+ dev_printk(KERN_DEBUG, &pdev->dev,
"unknown chip version, assuming %s\n",
- pci_name(pdev), rtl_chip_info[0].name);
+ rtl_chip_info[0].name);
}
i++;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c6b77acb35ef..e72e0e099060 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,12 +71,13 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
+#include <asm/irq.h>
/* local include */
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.14.2"
+#define DRV_VERSION "2.0.15.2"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -370,38 +371,50 @@ static const u64 fix_mac[] = {
END_SIGN
};
+MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
/* Module Loadable parameters. */
-static unsigned int tx_fifo_num = 1;
-static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
- {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
-static unsigned int rx_ring_num = 1;
-static unsigned int rx_ring_sz[MAX_RX_RINGS] =
- {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
-static unsigned int rts_frm_len[MAX_RX_RINGS] =
- {[0 ...(MAX_RX_RINGS - 1)] = 0 };
-static unsigned int rx_ring_mode = 1;
-static unsigned int use_continuous_tx_intrs = 1;
-static unsigned int rmac_pause_time = 0x100;
-static unsigned int mc_pause_threshold_q0q3 = 187;
-static unsigned int mc_pause_threshold_q4q7 = 187;
-static unsigned int shared_splits;
-static unsigned int tmac_util_period = 5;
-static unsigned int rmac_util_period = 5;
-static unsigned int bimodal = 0;
-static unsigned int l3l4hdr_size = 128;
-#ifndef CONFIG_S2IO_NAPI
-static unsigned int indicate_max_pkts;
-#endif
+S2IO_PARM_INT(tx_fifo_num, 1);
+S2IO_PARM_INT(rx_ring_num, 1);
+
+
+S2IO_PARM_INT(rx_ring_mode, 1);
+S2IO_PARM_INT(use_continuous_tx_intrs, 1);
+S2IO_PARM_INT(rmac_pause_time, 0x100);
+S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
+S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
+S2IO_PARM_INT(shared_splits, 0);
+S2IO_PARM_INT(tmac_util_period, 5);
+S2IO_PARM_INT(rmac_util_period, 5);
+S2IO_PARM_INT(bimodal, 0);
+S2IO_PARM_INT(l3l4hdr_size, 128);
/* Frequency of Rx desc syncs expressed as power of 2 */
-static unsigned int rxsync_frequency = 3;
+S2IO_PARM_INT(rxsync_frequency, 3);
/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
-static unsigned int intr_type = 0;
+S2IO_PARM_INT(intr_type, 0);
/* Large receive offload feature */
-static unsigned int lro = 0;
+S2IO_PARM_INT(lro, 0);
/* Max pkts to be aggregated by LRO at one time. If not specified,
* aggregation happens until we hit max IP pkt size(64K)
*/
-static unsigned int lro_max_pkts = 0xFFFF;
+S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
+#ifndef CONFIG_S2IO_NAPI
+S2IO_PARM_INT(indicate_max_pkts, 0);
+#endif
+
+static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
+ {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
+static unsigned int rx_ring_sz[MAX_RX_RINGS] =
+ {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
+static unsigned int rts_frm_len[MAX_RX_RINGS] =
+ {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+
+module_param_array(tx_fifo_len, uint, NULL, 0);
+module_param_array(rx_ring_sz, uint, NULL, 0);
+module_param_array(rts_frm_len, uint, NULL, 0);
/*
* S2IO device table.
@@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic)
size += config->tx_cfg[i].fifo_len;
}
if (size > MAX_AVAILABLE_TXDS) {
- DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
- __FUNCTION__);
+ DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
- return FAILURE;
+ return -EINVAL;
}
lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic)
nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
if (!nic->ufo_in_band_v)
return -ENOMEM;
+ memset(nic->ufo_in_band_v, 0, size);
/* Allocation and initialization of RXDs in Rings */
size = 0;
@@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic)
break;
}
- /* Enable Tx FIFO partition 0. */
+ /* Enable all configured Tx FIFO partitions */
val64 = readq(&bar0->tx_fifo_partition_0);
val64 |= (TX_FIFO_PARTITION_EN);
writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
writeq(temp64, &bar0->general_int_mask);
/*
* If Hercules adapter enable GPIO otherwise
- * disabled all PCIX, Flash, MDIO, IIC and GPIO
+ * disable all PCIX, Flash, MDIO, IIC and GPIO
* interrupts for now.
* TODO
*/
@@ -1976,7 +1989,6 @@ static int start_nic(struct s2io_nic *nic)
XENA_dev_config_t __iomem *bar0 = nic->bar0;
struct net_device *dev = nic->dev;
register u64 val64 = 0;
- u16 interruptible;
u16 subid, i;
mac_info_t *mac_control;
struct config_param *config;
@@ -2047,16 +2059,6 @@ static int start_nic(struct s2io_nic *nic)
return FAILURE;
}
- /* Enable select interrupts */
- if (nic->intr_type != INTA)
- en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
- else {
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR | RX_PIC_INTR;
- interruptible |= TX_MAC_INTR | RX_MAC_INTR;
- en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
- }
-
/*
* With some switches, link might be already up at this point.
* Because of this weird behavior, when we enable laser,
@@ -2130,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
frag->size, PCI_DMA_TODEVICE);
}
}
- txdlp->Host_Control = 0;
+ memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
return(skb);
}
@@ -2382,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
skb->data = (void *) (unsigned long)tmp;
skb->tail = (void *) (unsigned long)tmp;
- ((RxD3_t*)rxdp)->Buffer0_ptr =
- pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
+ if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
+ ((RxD3_t*)rxdp)->Buffer0_ptr =
+ pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
+ else
+ pci_dma_sync_single_for_device(nic->pdev,
+ (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
if (nic->rxd_mode == RXD_MODE_3B) {
/* Two buffer mode */
@@ -2397,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
(nic->pdev, skb->data, dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- /* Buffer-1 will be dummy buffer not used */
- ((RxD3_t*)rxdp)->Buffer1_ptr =
- pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
- PCI_DMA_FROMDEVICE);
+ /* Buffer-1 will be dummy buffer. Not used */
+ if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
+ ((RxD3_t*)rxdp)->Buffer1_ptr =
+ pci_map_single(nic->pdev,
+ ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ }
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3
(dev->mtu + 4);
@@ -2625,23 +2635,23 @@ no_rx:
}
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * s2io_netpoll - Rx interrupt service handler for netpoll support
+ * s2io_netpoll - netpoll event handler entry point
* @dev : pointer to the device structure.
* Description:
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
+ * This function will be called by upper layer to check for events on the
+ * interface in situations where interrupts are disabled. It is used for
+ * specific in-kernel networking tasks, such as remote consoles and kernel
+ * debugging over the network (example netdump in RedHat).
*/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
static void s2io_netpoll(struct net_device *dev)
{
nic_t *nic = dev->priv;
mac_info_t *mac_control;
struct config_param *config;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
- u64 val64;
+ u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
int i;
disable_irq(dev->irq);
@@ -2650,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev)
mac_control = &nic->mac_control;
config = &nic->config;
- val64 = readq(&bar0->rx_traffic_int);
writeq(val64, &bar0->rx_traffic_int);
+ writeq(val64, &bar0->tx_traffic_int);
+ /* we need to free up the transmitted skbufs or else netpoll will
+ * run out of skbs and will fail and eventually netpoll application such
+ * as netdump will fail.
+ */
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+
+ /* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]);
@@ -2719,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
/* If your are next to put index then it's FIFO full condition */
if ((get_block == put_block) &&
(get_info.offset + 1) == put_info.offset) {
- DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
+ DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
break;
}
skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2739,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
} else if (nic->rxd_mode == RXD_MODE_3B) {
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
pci_unmap_single(nic->pdev, (dma_addr_t)
- ((RxD3_t*)rxdp)->Buffer1_ptr,
- BUF1_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer2_ptr,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
} else {
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3338,7 +3353,7 @@ static void s2io_reset(nic_t * sp)
/* Clear certain PCI/PCI-X fields after reset */
if (sp->device_type == XFRAME_II_DEVICE) {
- /* Clear parity err detect bit */
+ /* Clear "detected parity error" bit */
pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
/* Clearing PCIX Ecc status register */
@@ -3539,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic)
u64 val64;
int i;
- for (i=0; i< nic->avail_msix_vectors; i++) {
+ for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3558,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic)
int i;
/* Store and display */
- for (i=0; i< nic->avail_msix_vectors; i++) {
+ for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
val64 = (BIT(15) | vBIT(i, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) {
@@ -3749,101 +3764,19 @@ static int s2io_open(struct net_device *dev)
if (err) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
- if (err == -ENODEV)
- goto hw_init_failed;
- else
- goto hw_enable_failed;
- }
-
- /* Store the values of the MSIX table in the nic_t structure */
- store_xmsi_data(sp);
-
- /* After proper initialization of H/W, register ISR */
- if (sp->intr_type == MSI) {
- err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
- IRQF_SHARED, sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: MSI registration \
-failed\n", dev->name);
- goto isr_registration_failed;
- }
- }
- if (sp->intr_type == MSI_X) {
- int i;
-
- for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
- if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
- sprintf(sp->desc1, "%s:MSI-X-%d-TX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle, 0, sp->desc1,
- sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
- (unsigned long long)sp->msix_info[i].addr);
- } else {
- sprintf(sp->desc2, "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle, 0, sp->desc2,
- sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
- (unsigned long long)sp->msix_info[i].addr);
- }
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
-failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
- goto isr_registration_failed;
- }
- sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
- }
- }
- if (sp->intr_type == INTA) {
- err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
- sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
- dev->name);
- goto isr_registration_failed;
- }
+ goto hw_init_failed;
}
if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
+ s2io_card_down(sp);
err = -ENODEV;
- goto setting_mac_address_failed;
+ goto hw_init_failed;
}
netif_start_queue(dev);
return 0;
-setting_mac_address_failed:
- if (sp->intr_type != MSI_X)
- free_irq(sp->pdev->irq, dev);
-isr_registration_failed:
- del_timer_sync(&sp->alarm_timer);
- if (sp->intr_type == MSI_X) {
- int i;
- u16 msi_control; /* Temp variable */
-
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
-
- free_irq(vector, arg);
- }
- pci_disable_msix(sp->pdev);
-
- /* Temp */
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
- }
- else if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
-hw_enable_failed:
- s2io_reset(sp);
hw_init_failed:
if (sp->intr_type == MSI_X) {
if (sp->entries)
@@ -3874,7 +3807,7 @@ static int s2io_close(struct net_device *dev)
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
- s2io_card_down(sp, 1);
+ s2io_card_down(sp);
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
@@ -3901,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
TxD_t *txdp;
TxFIFO_element_t __iomem *tx_fifo;
unsigned long flags;
-#ifdef NETIF_F_TSO
- int mss;
-#endif
u16 vlan_tag = 0;
int vlan_priority = 0;
mac_info_t *mac_control;
struct config_param *config;
+ int offload_type;
mac_control = &sp->mac_control;
config = &sp->config;
@@ -3955,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
- txdp->Control_1 = 0;
- txdp->Control_2 = 0;
+ offload_type = s2io_offload_type(skb);
#ifdef NETIF_F_TSO
- mss = skb_shinfo(skb)->gso_size;
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN;
- txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
+ txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
}
#endif
if (skb->ip_summed == CHECKSUM_HW) {
@@ -3979,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
}
frg_len = skb->len - skb->data_len;
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
+ if (offload_type == SKB_GSO_UDP) {
int ufo_size;
- ufo_size = skb_shinfo(skb)->gso_size;
+ ufo_size = s2io_udp_mss(skb);
ufo_size &= ~7;
txdp->Control_1 |= TXD_UFO_EN;
txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3999,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
sp->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
txdp++;
- txdp->Control_1 = 0;
- txdp->Control_2 = 0;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
txdp->Host_Control = (unsigned long) skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
-
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -4023,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
(sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
txdp->Control_1 |= TXD_GATHER_CODE_LAST;
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -4037,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
TX_FIFO_LAST_LIST);
-
-#ifdef NETIF_F_TSO
- if (mss)
- val64 |= TX_FIFO_SPECIAL_FUNC;
-#endif
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type)
val64 |= TX_FIFO_SPECIAL_FUNC;
+
writeq(val64, &tx_fifo->List_Control);
mmiowb();
@@ -4077,13 +3999,41 @@ s2io_alarm_handle(unsigned long data)
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
+static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
+{
+ int rxb_size, level;
+
+ if (!sp->lro) {
+ rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
+ level = rx_buffer_level(sp, rxb_size, rng_n);
+
+ if ((level == PANIC) && (!TASKLET_IN_USE)) {
+ int ret;
+ DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
+ DBG_PRINT(INTR_DBG, "PANIC levels\n");
+ if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "Out of memory in %s",
+ __FUNCTION__);
+ clear_bit(0, (&sp->tasklet_status));
+ return -1;
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ } else if (level == LOW)
+ tasklet_schedule(&sp->task);
+
+ } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
+ DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
+ }
+ return 0;
+}
+
static irqreturn_t
s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
nic_t *sp = dev->priv;
int i;
- int ret;
mac_info_t *mac_control;
struct config_param *config;
@@ -4105,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
* reallocate the buffers from the interrupt handler itself,
* else schedule a tasklet to reallocate the buffers.
*/
- for (i = 0; i < config->rx_ring_num; i++) {
- if (!sp->lro) {
- int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
- int level = rx_buffer_level(sp, rxb_size, i);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
- dev->name);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in ISR!!\n");
- clear_bit(0, (&sp->tasklet_status));
- atomic_dec(&sp->isr_cnt);
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, i) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
- break;
- }
- }
+ for (i = 0; i < config->rx_ring_num; i++)
+ s2io_chk_rx_buffers(sp, i);
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
@@ -4144,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
{
ring_info_t *ring = (ring_info_t *)dev_id;
nic_t *sp = ring->nic;
- struct net_device *dev = (struct net_device *) dev_id;
- int rxb_size, level, rng_n;
atomic_inc(&sp->isr_cnt);
- rx_intr_handler(ring);
- rng_n = ring->ring_no;
- if (!sp->lro) {
- rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
- level = rx_buffer_level(sp, rxb_size, rng_n);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- int ret;
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "Out of memory in %s",
- __FUNCTION__);
- clear_bit(0, (&sp->tasklet_status));
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
- }
+ rx_intr_handler(ring);
+ s2io_chk_rx_buffers(sp, ring->ring_no);
atomic_dec(&sp->isr_cnt);
-
return IRQ_HANDLED;
}
@@ -4341,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
* else schedule a tasklet to reallocate the buffers.
*/
#ifndef CONFIG_S2IO_NAPI
- for (i = 0; i < config->rx_ring_num; i++) {
- if (!sp->lro) {
- int ret;
- int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
- int level = rx_buffer_level(sp, rxb_size, i);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
- dev->name);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in ISR!!\n");
- clear_bit(0, (&sp->tasklet_status));
- atomic_dec(&sp->isr_cnt);
- writeq(org_mask, &bar0->general_int_mask);
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, i) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
- break;
- }
- }
+ for (i = 0; i < config->rx_ring_num; i++)
+ s2io_chk_rx_buffers(sp, i);
#endif
writeq(org_mask, &bar0->general_int_mask);
atomic_dec(&sp->isr_cnt);
@@ -4401,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp)
if (cnt == 5)
break; /* Updt failed */
} while(1);
+ } else {
+ memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
}
}
@@ -5035,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
}
static void s2io_vpd_read(nic_t *nic)
{
- u8 vpd_data[256],data;
+ u8 *vpd_data;
+ u8 data;
int i=0, cnt, fail = 0;
int vpd_addr = 0x80;
@@ -5048,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic)
vpd_addr = 0x50;
}
+ vpd_data = kmalloc(256, GFP_KERNEL);
+ if (!vpd_data)
+ return;
+
for (i = 0; i < 256; i +=4 ) {
pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -5070,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic)
memset(nic->product_name, 0, vpd_data[1]);
memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
}
+ kfree(vpd_data);
}
/**
@@ -5388,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
else
*data = 0;
- return 0;
+ return *data;
}
/**
@@ -5846,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
+static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
static struct ethtool_ops netdev_ethtool_ops = {
.get_settings = s2io_ethtool_gset,
@@ -5866,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
+ .get_tso = s2io_ethtool_op_get_tso,
+ .set_tso = s2io_ethtool_op_set_tso,
#endif
.get_ufo = ethtool_op_get_ufo,
.set_ufo = ethtool_op_set_ufo,
@@ -5919,7 +5808,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- s2io_card_down(sp, 0);
+ s2io_card_down(sp);
netif_stop_queue(dev);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -6216,43 +6105,106 @@ static int rxd_owner_bit_reset(nic_t *sp)
}
-static void s2io_card_down(nic_t * sp, int flag)
+static int s2io_add_isr(nic_t * sp)
{
- int cnt = 0;
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
- unsigned long flags;
- register u64 val64 = 0;
+ int ret = 0;
struct net_device *dev = sp->dev;
+ int err = 0;
- del_timer_sync(&sp->alarm_timer);
- /* If s2io_set_link task is executing, wait till it completes. */
- while (test_and_set_bit(0, &(sp->link_state))) {
- msleep(50);
+ if (sp->intr_type == MSI)
+ ret = s2io_enable_msi(sp);
+ else if (sp->intr_type == MSI_X)
+ ret = s2io_enable_msi_x(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+ sp->intr_type = INTA;
}
- atomic_set(&sp->card_state, CARD_DOWN);
- /* disable Tx and Rx traffic on the NIC */
- stop_nic(sp);
- if (flag) {
- if (sp->intr_type == MSI_X) {
- int i;
- u16 msi_control;
+ /* Store the values of the MSIX table in the nic_t structure */
+ store_xmsi_data(sp);
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ /* After proper initialization of H/W, register ISR */
+ if (sp->intr_type == MSI) {
+ err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
+ IRQF_SHARED, sp->name, dev);
+ if (err) {
+ pci_disable_msi(sp->pdev);
+ DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ if (sp->intr_type == MSI_X) {
+ int i;
- free_irq(vector, arg);
+ for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
+ if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
+ } else {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
}
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
- pci_disable_msix(sp->pdev);
- } else {
- free_irq(sp->pdev->irq, dev);
- if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
+ if (err) {
+ DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+ DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
+ return -1;
+ }
+ sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
+ }
+ }
+ if (sp->intr_type == INTA) {
+ err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
+ sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ return 0;
+}
+static void s2io_rem_isr(nic_t * sp)
+{
+ int cnt = 0;
+ struct net_device *dev = sp->dev;
+
+ if (sp->intr_type == MSI_X) {
+ int i;
+ u16 msi_control;
+
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+ } else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI) {
+ u16 val;
+
+ pci_disable_msi(sp->pdev);
+ pci_read_config_word(sp->pdev, 0x4c, &val);
+ val ^= 0x1;
+ pci_write_config_word(sp->pdev, 0x4c, val);
}
}
/* Waiting till all Interrupt handlers are complete */
@@ -6263,6 +6215,26 @@ static void s2io_card_down(nic_t * sp, int flag)
break;
cnt++;
} while(cnt < 5);
+}
+
+static void s2io_card_down(nic_t * sp)
+{
+ int cnt = 0;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ unsigned long flags;
+ register u64 val64 = 0;
+
+ del_timer_sync(&sp->alarm_timer);
+ /* If s2io_set_link task is executing, wait till it completes. */
+ while (test_and_set_bit(0, &(sp->link_state))) {
+ msleep(50);
+ }
+ atomic_set(&sp->card_state, CARD_DOWN);
+
+ /* disable Tx and Rx traffic on the NIC */
+ stop_nic(sp);
+
+ s2io_rem_isr(sp);
/* Kill tasklet. */
tasklet_kill(&sp->task);
@@ -6314,23 +6286,16 @@ static int s2io_card_up(nic_t * sp)
mac_info_t *mac_control;
struct config_param *config;
struct net_device *dev = (struct net_device *) sp->dev;
+ u16 interruptible;
/* Initialize the H/W I/O registers */
if (init_nic(sp) != 0) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
+ s2io_reset(sp);
return -ENODEV;
}
- if (sp->intr_type == MSI)
- ret = s2io_enable_msi(sp);
- else if (sp->intr_type == MSI_X)
- ret = s2io_enable_msi_x(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
- sp->intr_type = INTA;
- }
-
/*
* Initializing the Rx buffers. For now we are considering only 1
* Rx ring and initializing buffers into 30 Rx blocks
@@ -6354,28 +6319,46 @@ static int s2io_card_up(nic_t * sp)
s2io_set_multicast(dev);
if (sp->lro) {
- /* Initialize max aggregatable pkts based on MTU */
+ /* Initialize max aggregatable pkts per session based on MTU */
sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
/* Check if we can use(if specified) user provided value */
if (lro_max_pkts < sp->lro_max_aggr_per_sess)
sp->lro_max_aggr_per_sess = lro_max_pkts;
}
- /* Enable tasklet for the device */
- tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
-
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- tasklet_kill(&sp->task);
s2io_reset(sp);
- free_irq(dev->irq, dev);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ /* Add interrupt service routine */
+ if (s2io_add_isr(sp) != 0) {
+ if (sp->intr_type == MSI_X)
+ s2io_rem_isr(sp);
+ s2io_reset(sp);
free_rx_buffers(sp);
return -ENODEV;
}
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+ /* Enable tasklet for the device */
+ tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
+
+ /* Enable select interrupts */
+ if (sp->intr_type != INTA)
+ en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
+ else {
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ }
+
+
atomic_set(&sp->card_state, CARD_UP);
return 0;
}
@@ -6395,7 +6378,7 @@ static void s2io_restart_nic(unsigned long data)
struct net_device *dev = (struct net_device *) data;
nic_t *sp = dev->priv;
- s2io_card_down(sp, 0);
+ s2io_card_down(sp);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
dev->name);
@@ -6437,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
* @cksum : FCS checksum of the frame.
* @ring_no : the ring from which this RxD was extracted.
* Description:
- * This function is called by the Tx interrupt serivce routine to perform
+ * This function is called by the Rx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
* layers. It mainly checks if the checksum is OK, if so adds it to the
* SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6697,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp)
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
}
-MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_param(tx_fifo_num, int, 0);
-module_param(rx_ring_num, int, 0);
-module_param(rx_ring_mode, int, 0);
-module_param_array(tx_fifo_len, uint, NULL, 0);
-module_param_array(rx_ring_sz, uint, NULL, 0);
-module_param_array(rts_frm_len, uint, NULL, 0);
-module_param(use_continuous_tx_intrs, int, 1);
-module_param(rmac_pause_time, int, 0);
-module_param(mc_pause_threshold_q0q3, int, 0);
-module_param(mc_pause_threshold_q4q7, int, 0);
-module_param(shared_splits, int, 0);
-module_param(tmac_util_period, int, 0);
-module_param(rmac_util_period, int, 0);
-module_param(bimodal, bool, 0);
-module_param(l3l4hdr_size, int , 0);
-#ifndef CONFIG_S2IO_NAPI
-module_param(indicate_max_pkts, int, 0);
-#endif
-module_param(rxsync_frequency, int, 0);
-module_param(intr_type, int, 0);
-module_param(lro, int, 0);
-module_param(lro_max_pkts, int, 0);
-
static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
{
if ( tx_fifo_num > 8) {
@@ -6831,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
}
if (dev_intr_type != MSI_X) {
if (pci_request_regions(pdev, s2io_driver_name)) {
- DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
- pci_disable_device(pdev);
+ DBG_PRINT(ERR_DBG, "Request Regions failed\n");
+ pci_disable_device(pdev);
return -ENODEV;
}
}
@@ -6956,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* initialize the shared memory used by the NIC and the host */
if (init_shared_mem(sp)) {
DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
- __FUNCTION__);
+ dev->name);
ret = -ENOMEM;
goto mem_alloc_failed;
}
@@ -7093,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ /* reset Nic and bring it to known state */
+ s2io_reset(sp);
+
/*
* Initialize the tasklet status and link state flags
* and the card state parameter
@@ -7130,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
goto register_failed;
}
s2io_vpd_read(sp);
- DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
- DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
- get_xena_rev_id(sp->pdev),
- s2io_driver_version);
DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
+ DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
+ sp->product_name, get_xena_rev_id(sp->pdev));
+ DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
+ s2io_driver_version);
DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
"%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
sp->def_mac_addr[0].mac_addr[0],
@@ -7435,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
if (ip->ihl != 5) /* IP has options */
return -1;
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
+ return -1;
+
+ /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
- !tcp->ack) {
+ tcp->ece || tcp->cwr || !tcp->ack) {
/*
* Currently recognize only the ack control word and
* any other control field being set would result in
@@ -7590,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb)
static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
u32 tcp_len)
{
- struct sk_buff *tmp, *first = lro->parent;
+ struct sk_buff *first = lro->parent;
first->len += tcp_len;
first->data_len = lro->frags_len;
skb_pull(skb, (skb->len - tcp_len));
- if ((tmp = skb_shinfo(first)->frag_list)) {
- while (tmp->next)
- tmp = tmp->next;
- tmp->next = skb;
- }
+ if (skb_shinfo(first)->frag_list)
+ lro->last_frag->next = skb;
else
skb_shinfo(first)->frag_list = skb;
+ lro->last_frag = skb;
sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
return;
}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index c43f52179708..5ed49c3be1e9 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -719,6 +719,7 @@ struct msix_info_st {
/* Data structure to represent a LRO session */
typedef struct lro {
struct sk_buff *parent;
+ struct sk_buff *last_frag;
u8 *l2h;
struct iphdr *iph;
struct tcphdr *tcph;
@@ -829,8 +830,7 @@ struct s2io_nic {
#define MSIX_FLG 0xA5
struct msix_entry *entries;
struct s2io_msix_entry *s2io_entries;
- char desc1[35];
- char desc2[35];
+ char desc[MAX_REQUESTED_MSI_X][25];
int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
@@ -1002,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic, int flag);
+static void s2io_card_down(nic_t *nic);
static int s2io_card_up(nic_t *nic);
static int get_xena_rev_id(struct pci_dev *pdev);
static void restore_xmsi_data(nic_t *nic);
@@ -1012,4 +1012,13 @@ static void clear_lro_session(lro_t *lro);
static void queue_rx_frame(struct sk_buff *skb);
static void update_L3L4_header(nic_t *sp, lro_t *lro);
static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
+
+#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
+
+#define S2IO_PARM_INT(X, def_val) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0);
+
#endif /* _S2IO_H */
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index efd0f235020f..01392bca0223 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -742,7 +742,7 @@ module_param(irq, int, 0);
MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
-int init_module(void)
+int __init init_module(void)
{
dev_seeq = seeq8005_probe(-1);
if (IS_ERR(dev_seeq))
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h
index 2b19f8ad0318..7f8e6d0084c7 100644
--- a/drivers/net/sk98lin/h/xmac_ii.h
+++ b/drivers/net/sk98lin/h/xmac_ii.h
@@ -1473,7 +1473,7 @@ extern "C" {
#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */
#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */
#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */
-#define GM_TXCR_COL_THR_MSK (1<<10) /* Bit 12..10: Collision Threshold */
+#define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */
#define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..ad878dfddef4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
/* Chip internal frequency for clock calculations */
static inline u32 hwkhz(const struct skge_hw *hw)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
- return 53215; /* or: 53.125 MHz */
- else
- return 78215; /* or: 78.125 MHz */
+ return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
}
/* Chip HZ to microseconds */
@@ -2214,6 +2211,7 @@ static int skge_up(struct net_device *dev)
skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
skge_led(skge, LED_MODE_ON);
+ netif_poll_enable(dev);
return 0;
free_rx_ring:
@@ -2282,6 +2280,7 @@ static int skge_down(struct net_device *dev)
skge_led(skge, LED_MODE_OFF);
+ netif_poll_disable(dev);
skge_tx_clean(skge);
skge_rx_clean(skge);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index ed19ff47ce11..593387b3c0dd 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -1734,11 +1734,11 @@ enum {
GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
- GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
};
#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
-#define TX_COL_DEF 0x04
+#define TX_COL_DEF 0x04 /* late collision after 64 byte */
/* GM_RX_CTRL 16 bit r/w Receive Control Register */
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 418f169a6a31..933e87f1cc68 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.4"
+#define DRV_VERSION "1.5"
#define PFX DRV_NAME " "
/*
@@ -65,6 +65,7 @@
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
#define RX_DEF_PENDING RX_MAX_PENDING
#define RX_SKB_ALIGN 8
+#define RX_BUF_WRITE 16
#define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -232,9 +233,10 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
if (hw->ports > 1)
reg1 |= PCI_Y2_PHY2_COMA;
}
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ udelay(100);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
@@ -242,8 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_pci_write32(hw, PCI_DEV_REG5, 0);
}
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
-
break;
case PCI_D3hot:
@@ -255,6 +255,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
else
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ udelay(100);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -1159,7 +1160,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
++count;
if (skb->ip_summed == CHECKSUM_HW)
@@ -1389,7 +1390,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
sky2->tx_cons = put;
- if (tx_avail(sky2) > MAX_SKB_TX_LE)
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
netif_wake_queue(dev);
}
@@ -1888,9 +1889,6 @@ resubmit:
re->skb->ip_summed = CHECKSUM_NONE;
sky2_rx_add(sky2, re->mapaddr);
- /* Tell receiver about new buffers. */
- sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
-
return skb;
oversize:
@@ -1937,7 +1935,9 @@ static inline int sky2_more_work(const struct sky2_hw *hw)
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
+ struct sky2_port *sky2;
int work_done = 0;
+ unsigned buf_write[2] = { 0, 0 };
u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
rmb();
@@ -1945,7 +1945,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
while (hw->st_idx != hwidx) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
- struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
@@ -1978,6 +1977,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
#endif
netif_receive_skb(skb);
+ /* Update receiver after 16 frames */
+ if (++buf_write[le->link] == RX_BUF_WRITE) {
+ sky2_put_idx(hw, rxqaddr[le->link],
+ sky2->rx_put);
+ buf_write[le->link] = 0;
+ }
+
+ /* Stop after net poll weight */
if (++work_done >= to_do)
goto exit_loop;
break;
@@ -2016,6 +2023,16 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
}
exit_loop:
+ if (buf_write[0]) {
+ sky2 = netdev_priv(hw->dev[0]);
+ sky2_put_idx(hw, Q_R1, sky2->rx_put);
+ }
+
+ if (buf_write[1]) {
+ sky2 = netdev_priv(hw->dev[1]);
+ sky2_put_idx(hw, Q_R2, sky2->rx_put);
+ }
+
return work_done;
}
@@ -2186,9 +2203,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- if (!~status)
- goto out;
-
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2225,7 +2239,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (sky2_more_work(hw))
return 1;
-out:
+
netif_rx_complete(dev0);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2286,7 +2300,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
}
-static int __devinit sky2_reset(struct sky2_hw *hw)
+static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
u8 t8, pmd_type;
@@ -3437,17 +3451,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return -EINVAL;
del_timer_sync(&hw->idle_timer);
+ netif_poll_disable(hw->dev[0]);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
- if (!netif_running(dev))
- continue;
-
+ if (netif_running(dev)) {
sky2_down(dev);
netif_device_detach(dev);
- netif_poll_disable(dev);
}
}
@@ -3474,9 +3485,8 @@ static int sky2_resume(struct pci_dev *pdev)
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev && netif_running(dev)) {
+ if (netif_running(dev)) {
netif_device_attach(dev);
- netif_poll_enable(dev);
err = sky2_up(dev);
if (err) {
@@ -3488,6 +3498,7 @@ static int sky2_resume(struct pci_dev *pdev)
}
}
+ netif_poll_enable(hw->dev[0]);
sky2_idle_start(hw);
out:
return err;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8a0bc5525f0a..2db8d19b22d1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1480,7 +1480,7 @@ enum {
GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
- GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
};
#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index d37bd860b336..0b15290df278 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs
/* Spurious interrupt check */
if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
(INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 3d8dcb6c8758..cf62373b808b 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev)
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
/* Disable all interrupts, block TX tasklet */
- spin_lock(&lp->lock);
+ spin_lock_irq(&lp->lock);
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(0);
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
- spin_unlock(&lp->lock);
+ spin_unlock_irq(&lp->lock);
/* free any pending tx skb */
if (pending_skb) {
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev)
DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
/* no more interrupts for me */
- spin_lock(&lp->lock);
+ spin_lock_irq(&lp->lock);
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(0);
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
- spin_unlock(&lp->lock);
+ spin_unlock_irq(&lp->lock);
if (pending_skb)
dev_kfree_skb(pending_skb);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index b4028049ed76..7aa7fbac8224 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -136,14 +136,9 @@
#define SMC_CAN_USE_32BIT 0
#define SMC_IO_SHIFT 0
#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_IO_SHIFT 0
#define SMC_NOWAIT 1
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#include <asm/mach-types.h>
#include <asm/arch/cpu.h>
@@ -354,6 +343,42 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0)
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
#else
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index fb1d5a8a45cf..88907218457a 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
*
* returns the content of the specified SMMIO register.
*/
-static u32
+static inline u32
spider_net_read_reg(struct spider_net_card *card, u32 reg)
{
u32 value;
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
* @reg: register to write to
* @value: value to write into the specified SMMIO register
*/
-static void
+static inline void
spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
{
value = cpu_to_le32(value);
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
-static enum spider_net_descr_status
+static inline int
spider_net_get_descr_status(struct spider_net_descr *descr)
{
- u32 cmd_status;
-
- cmd_status = descr->dmac_cmd_status;
- cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
- /* no need to mask out any bits, as cmd_status is 32 bits wide only
- * (and unsigned) */
- return cmd_status;
-}
-
-/**
- * spider_net_set_descr_status -- sets the status of a descriptor
- * @descr: descriptor to change
- * @status: status to set in the descriptor
- *
- * changes the status to the specified value. Doesn't change other bits
- * in the status
- */
-static void
-spider_net_set_descr_status(struct spider_net_descr *descr,
- enum spider_net_descr_status status)
-{
- u32 cmd_status;
- /* read the status */
- cmd_status = descr->dmac_cmd_status;
- /* clean the upper 4 bits */
- cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
- /* add the status to it */
- cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
- /* and write it back */
- descr->dmac_cmd_status = cmd_status;
+ return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
}
/**
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
static int
spider_net_init_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain,
- struct spider_net_descr *start_descr, int no)
+ struct spider_net_descr *start_descr,
+ int direction, int no)
{
int i;
struct spider_net_descr *descr;
dma_addr_t buf;
- atomic_set(&card->rx_chain_refill,0);
-
descr = start_descr;
memset(descr, 0, sizeof(*descr) * no);
/* set up the hardware pointers in each descriptor */
for (i=0; i<no; i++, descr++) {
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
buf = pci_map_single(card->pdev, descr,
SPIDER_NET_DESCR_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ direction);
if (buf == DMA_ERROR_CODE)
goto iommu_error;
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
start_descr->prev = descr-1;
descr = start_descr;
- for (i=0; i < no; i++, descr++) {
- descr->next_descr_addr = descr->next->bus_addr;
- }
+ if (direction == PCI_DMA_FROMDEVICE)
+ for (i=0; i < no; i++, descr++)
+ descr->next_descr_addr = descr->next->bus_addr;
+ spin_lock_init(&chain->lock);
chain->head = start_descr;
chain->tail = start_descr;
@@ -375,7 +346,7 @@ iommu_error:
if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr,
SPIDER_NET_DESCR_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ direction);
return -ENOMEM;
}
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_FRAME,
- PCI_DMA_BIDIRECTIONAL);
+ PCI_DMA_FROMDEVICE);
}
descr = descr->next;
}
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
descr->buf_addr = buf;
if (buf == DMA_ERROR_CODE) {
dev_kfree_skb_any(descr->skb);
if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Could not iommu-map rx buffer\n");
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
} else {
- descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
+ SPIDER_NET_DMAC_NOINTR_COMPLETE;
}
return error;
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
* chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac.
*/
-static void
+static inline void
spider_net_enable_rxchtails(struct spider_net_card *card)
{
/* assume chain is aligned correctly */
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
* spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
* in the GDADMACCNTR register
*/
-static void
+static inline void
spider_net_enable_rxdmac(struct spider_net_card *card)
{
wmb();
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
static void
spider_net_refill_rx_chain(struct spider_net_card *card)
{
- struct spider_net_descr_chain *chain;
-
- chain = &card->rx_chain;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ unsigned long flags;
/* one context doing the refill (and a second context seeing that
* and omitting it) is ok. If called by NAPI, we'll be called again
* as spider_net_decode_one_descr is called several times. If some
* interrupt calls us, the NAPI is about to clean up anyway. */
- if (atomic_inc_return(&card->rx_chain_refill) == 1)
- while (spider_net_get_descr_status(chain->head) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
+ if (!spin_trylock_irqsave(&chain->lock, flags))
+ return;
+
+ while (spider_net_get_descr_status(chain->head) ==
+ SPIDER_NET_DESCR_NOT_IN_USE) {
+ if (spider_net_prepare_rx_descr(card, chain->head))
+ break;
+ chain->head = chain->head->next;
+ }
- atomic_dec(&card->rx_chain_refill);
+ spin_unlock_irqrestore(&chain->lock, flags);
}
/**
@@ -554,111 +527,6 @@ error:
}
/**
- * spider_net_release_tx_descr - processes a used tx descriptor
- * @card: card structure
- * @descr: descriptor to release
- *
- * releases a used tx descriptor (unmapping, freeing of skb)
- */
-static void
-spider_net_release_tx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct sk_buff *skb;
-
- /* unmap the skb */
- skb = descr->skb;
- pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
- PCI_DMA_BIDIRECTIONAL);
-
- dev_kfree_skb_any(skb);
-
- /* set status to not used */
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not loose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct spider_net_descr_chain *tx_chain = &card->tx_chain;
- enum spider_net_descr_status status;
-
- if (atomic_inc_return(&card->tx_chain_release) != 1) {
- atomic_dec(&card->tx_chain_release);
- return 1;
- }
-
- for (;;) {
- status = spider_net_get_descr_status(tx_chain->tail);
- switch (status) {
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal)
- goto out;
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED) */
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- pr_err("%s: forcing end of tx descriptor "
- "with status x%02x\n",
- card->netdev->name, status);
- card->netdev_stats.tx_dropped++;
- break;
-
- case SPIDER_NET_DESCR_COMPLETE:
- card->netdev_stats.tx_packets++;
- card->netdev_stats.tx_bytes +=
- tx_chain->tail->skb->len;
- break;
-
- default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
- goto out;
- }
- spider_net_release_tx_descr(card, tx_chain->tail);
- tx_chain->tail = tx_chain->tail->next;
- }
-out:
- atomic_dec(&card->tx_chain_release);
-
- netif_wake_queue(card->netdev);
-
- if (status == SPIDER_NET_DESCR_CARDOWNED)
- return 1;
- return 0;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
- *
- * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
- * interrupts to cleanup our TX ring) and returns sent packets to the stack
- * by freeing them
- */
-static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
-{
- if ( (spider_net_release_tx_chain(card, 0)) &&
- (card->netdev->flags & IFF_UP) ) {
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
- }
-}
-
-/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
* @addr: multicast address
*
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
}
/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- tasklet_kill(&card->rxram_full_tl);
- netif_poll_disable(netdev);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
-
- /* disable/mask all interrupts */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
-
- /* free_irq(netdev->irq, netdev);*/
- free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_get_next_tx_descr - returns the next available tx descriptor
- * @card: device structure to get descriptor from
- *
- * returns the address of the next descriptor, or NULL if not available.
- */
-static struct spider_net_descr *
-spider_net_get_next_tx_descr(struct spider_net_card *card)
-{
- /* check, if head points to not-in-use descr */
- if ( spider_net_get_descr_status(card->tx_chain.head) ==
- SPIDER_NET_DESCR_NOT_IN_USE ) {
- return card->tx_chain.head;
- } else {
- return NULL;
- }
-}
-
-/**
- * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
- * @descr: descriptor structure to fill out
- * @skb: packet to consider
- *
- * fills out the command and status field of the descriptor structure,
- * depending on hardware checksum settings.
- */
-static void
-spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
- struct sk_buff *skb)
-{
- /* make sure the other fields in the descriptor are written */
- wmb();
-
- if (skb->ip_summed != CHECKSUM_HW) {
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
- return;
- }
-
- /* is packet ip?
- * if yes: tcp? udp? */
- if (skb->protocol == htons(ETH_P_IP)) {
- if (skb->nh.iph->protocol == IPPROTO_TCP)
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
- else if (skb->nh.iph->protocol == IPPROTO_UDP)
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
- else /* the stack should checksum non-tcp and non-udp
- packets on his own: NETIF_F_IP_CSUM */
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
- }
-}
-
-/**
* spider_net_prepare_tx_descr - fill tx descriptor with skb data
* @card: card structure
* @descr: descriptor structure to fill out
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
*/
static int
spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr,
struct sk_buff *skb)
{
+ struct spider_net_descr *descr = card->tx_chain.head;
dma_addr_t buf;
- buf = pci_map_single(card->pdev, skb->data,
- skb->len, PCI_DMA_BIDIRECTIONAL);
+ buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (buf == DMA_ERROR_CODE) {
if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). "
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
descr->buf_addr = buf;
descr->buf_size = skb->len;
+ descr->next_descr_addr = 0;
descr->skb = skb;
descr->data_status = 0;
- spider_net_set_txdescr_cmdstat(descr,skb);
+ descr->dmac_cmd_status =
+ SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
+ if (skb->protocol == htons(ETH_P_IP))
+ switch (skb->nh.iph->protocol) {
+ case IPPROTO_TCP:
+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
+ break;
+ case IPPROTO_UDP:
+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
+ break;
+ }
+
+ descr->prev->next_descr_addr = descr->bus_addr;
+
+ return 0;
+}
+
+/**
+ * spider_net_release_tx_descr - processes a used tx descriptor
+ * @card: card structure
+ * @descr: descriptor to release
+ *
+ * releases a used tx descriptor (unmapping, freeing of skb)
+ */
+static inline void
+spider_net_release_tx_descr(struct spider_net_card *card)
+{
+ struct spider_net_descr *descr = card->tx_chain.tail;
+ struct sk_buff *skb;
+
+ card->tx_chain.tail = card->tx_chain.tail->next;
+ descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+
+ /* unmap the skb */
+ skb = descr->skb;
+ pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * spider_net_release_tx_chain - processes sent tx descriptors
+ * @card: adapter structure
+ * @brutal: if set, don't care about whether descriptor seems to be in use
+ *
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not loose initiative.
+ */
+static int
+spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
+{
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ int status;
+
+ spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
+
+ while (chain->tail != chain->head) {
+ status = spider_net_get_descr_status(chain->tail);
+ switch (status) {
+ case SPIDER_NET_DESCR_COMPLETE:
+ card->netdev_stats.tx_packets++;
+ card->netdev_stats.tx_bytes += chain->tail->skb->len;
+ break;
+
+ case SPIDER_NET_DESCR_CARDOWNED:
+ if (!brutal)
+ return 1;
+ /* fallthrough, if we release the descriptors
+ * brutally (then we don't care about
+ * SPIDER_NET_DESCR_CARDOWNED) */
+
+ case SPIDER_NET_DESCR_RESPONSE_ERROR:
+ case SPIDER_NET_DESCR_PROTECTION_ERROR:
+ case SPIDER_NET_DESCR_FORCE_END:
+ if (netif_msg_tx_err(card))
+ pr_err("%s: forcing end of tx descriptor "
+ "with status x%02x\n",
+ card->netdev->name, status);
+ card->netdev_stats.tx_errors++;
+ break;
+
+ default:
+ card->netdev_stats.tx_dropped++;
+ return 1;
+ }
+ spider_net_release_tx_descr(card);
+ }
return 0;
}
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
* spider_net_kick_tx_dma writes the current tx chain head as start address
* of the tx descriptor chain and enables the transmission DMA engine
*/
-static void
-spider_net_kick_tx_dma(struct spider_net_card *card,
- struct spider_net_descr *descr)
+static inline void
+spider_net_kick_tx_dma(struct spider_net_card *card)
{
- /* this is the only descriptor in the output chain.
- * Enable TX DMA */
+ struct spider_net_descr *descr;
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
+ if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
+ SPIDER_NET_TX_DMA_EN)
+ goto out;
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
+ descr = card->tx_chain.tail;
+ for (;;) {
+ if (spider_net_get_descr_status(descr) ==
+ SPIDER_NET_DESCR_CARDOWNED) {
+ spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
+ descr->bus_addr);
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_VALUE);
+ break;
+ }
+ if (descr == card->tx_chain.head)
+ break;
+ descr = descr->next;
+ }
+
+out:
+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
}
/**
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, <0 on failure
+ * returns 0 on success, !0 on failure
*/
static int
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
- struct spider_net_descr *descr;
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ struct spider_net_descr *descr = chain->head;
+ unsigned long flags;
int result;
+ spin_lock_irqsave(&chain->lock, flags);
+
spider_net_release_tx_chain(card, 0);
- descr = spider_net_get_next_tx_descr(card);
+ if (chain->head->next == chain->tail->prev) {
+ card->netdev_stats.tx_dropped++;
+ result = NETDEV_TX_LOCKED;
+ goto out;
+ }
- if (!descr)
- goto error;
+ if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
+ result = NETDEV_TX_LOCKED;
+ goto out;
+ }
- result = spider_net_prepare_tx_descr(card, descr, skb);
- if (result)
- goto error;
+ if (spider_net_prepare_tx_descr(card, skb) != 0) {
+ card->netdev_stats.tx_dropped++;
+ result = NETDEV_TX_BUSY;
+ goto out;
+ }
+ result = NETDEV_TX_OK;
+
+ spider_net_kick_tx_dma(card);
card->tx_chain.head = card->tx_chain.head->next;
- if (spider_net_get_descr_status(descr->prev) !=
- SPIDER_NET_DESCR_CARDOWNED) {
- /* make sure the current descriptor is in memory. Then
- * kicking it on again makes sense, if the previous is not
- * card-owned anymore. Check the previous descriptor twice
- * to omit an mb() in heavy traffic cases */
- mb();
- if (spider_net_get_descr_status(descr->prev) !=
- SPIDER_NET_DESCR_CARDOWNED)
- spider_net_kick_tx_dma(card, descr);
- }
+out:
+ spin_unlock_irqrestore(&chain->lock, flags);
+ netif_wake_queue(netdev);
+ return result;
+}
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
+ * interrupts to cleanup our TX ring) and returns sent packets to the stack
+ * by freeing them
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+ unsigned long flags;
- return NETDEV_TX_OK;
+ spin_lock_irqsave(&card->tx_chain.lock, flags);
-error:
- card->netdev_stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ if ((spider_net_release_tx_chain(card, 0) != 0) &&
+ (card->netdev->flags & IFF_UP))
+ spider_net_kick_tx_dma(card);
+
+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);
}
/**
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
/* unmap descriptor */
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
- PCI_DMA_BIDIRECTIONAL);
+ PCI_DMA_FROMDEVICE);
/* the cases we'll throw away the packet immediately */
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
static int
spider_net_decode_one_descr(struct spider_net_card *card, int napi)
{
- enum spider_net_descr_status status;
- struct spider_net_descr *descr;
- struct spider_net_descr_chain *chain;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *descr = chain->tail;
+ int status;
int result;
- chain = &card->rx_chain;
- descr = chain->tail;
-
status = spider_net_get_descr_status(descr);
if (status == SPIDER_NET_DESCR_CARDOWNED) {
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
card->netdev->name, status);
card->netdev_stats.rx_dropped++;
pci_unmap_single(card->pdev, descr->buf_addr,
- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(descr->skb);
goto refill;
}
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
/* ok, we've got a packet in descr */
result = spider_net_pass_skb_up(descr, card, napi);
refill:
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
/* change the descriptor state: */
if (!napi)
spider_net_refill_rx_chain(card);
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
}
/**
- * spider_net_enable_txdmac - enables a TX DMA controller
- * @card: card structure
- *
- * spider_net_enable_txdmac enables the TX DMA controller by setting the
- * descriptor chain tail address
- */
-static void
-spider_net_enable_txdmac(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- card->tx_chain.tail->bus_addr);
-}
-
-/**
* spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
* @card: card structure
*
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
{ SPIDER_NET_GMRWOLCTRL, 0 },
{ SPIDER_NET_GTESTMD, 0x10000000 },
{ SPIDER_NET_GTTQMSK, 0x00400040 },
- { SPIDER_NET_GTESTMD, 0 },
{ SPIDER_NET_GMACINTEN, 0 },
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
- /* set chain tail adress for TX chain */
- spider_net_enable_txdmac(card);
-
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
SPIDER_NET_LENLMT_VALUE);
spider_net_write_reg(card, SPIDER_NET_GMACMODE,
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
SPIDER_NET_INT1_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
SPIDER_NET_INT2_MASK_VALUE);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_GDTDCEIDIS);
}
/**
@@ -1727,11 +1611,12 @@ spider_net_open(struct net_device *netdev)
int result;
result = -ENOMEM;
- if (spider_net_init_chain(card, &card->tx_chain,
- card->descr, tx_descriptors))
+ if (spider_net_init_chain(card, &card->tx_chain, card->descr,
+ PCI_DMA_TODEVICE, card->tx_desc))
goto alloc_tx_failed;
if (spider_net_init_chain(card, &card->rx_chain,
- card->descr + tx_descriptors, rx_descriptors))
+ card->descr + card->rx_desc,
+ PCI_DMA_FROMDEVICE, card->rx_desc))
goto alloc_rx_failed;
/* allocate rx skbs */
@@ -1938,7 +1823,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
/* empty sequencer data */
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
+ spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
sequencer * 8, 0x0);
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
@@ -1955,6 +1840,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
}
/**
+ * spider_net_stop - called upon ifconfig down
+ * @netdev: interface device structure
+ *
+ * always returns 0
+ */
+int
+spider_net_stop(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ tasklet_kill(&card->rxram_full_tl);
+ netif_poll_disable(netdev);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ del_timer_sync(&card->tx_timer);
+
+ /* disable/mask all interrupts */
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
+
+ /* free_irq(netdev->irq, netdev);*/
+ free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_FEND_VALUE);
+
+ /* turn off DMA, force end */
+ spider_net_disable_rxdmac(card);
+
+ /* release chains */
+ if (spin_trylock(&card->tx_chain.lock)) {
+ spider_net_release_tx_chain(card, 1);
+ spin_unlock(&card->tx_chain.lock);
+ }
+
+ spider_net_free_chain(card, &card->tx_chain);
+ spider_net_free_chain(card, &card->rx_chain);
+
+ return 0;
+}
+
+/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
* @data: data, is interface device structure
@@ -1982,7 +1910,7 @@ spider_net_tx_timeout_task(void *data)
goto out;
spider_net_open(netdev);
- spider_net_kick_tx_dma(card, card->tx_chain.head);
+ spider_net_kick_tx_dma(card);
netif_device_attach(netdev);
out:
@@ -2065,7 +1993,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
pci_set_drvdata(card->pdev, netdev);
- atomic_set(&card->tx_chain_release,0);
card->rxram_full_tl.data = (unsigned long) card;
card->rxram_full_tl.func =
(void (*)(unsigned long)) spider_net_handle_rxram_full;
@@ -2077,9 +2004,12 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
+ card->tx_desc = tx_descriptors;
+ card->rx_desc = rx_descriptors;
+
spider_net_setup_netdev_ops(netdev);
- netdev->features = NETIF_F_HW_CSUM;
+ netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
* NETIF_F_HW_VLAN_FILTER */
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3b8d951cf73c..30407cdf0892 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_DMA_RX_VALUE 0x80000000
#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
/* to set TX_DMA_EN */
-#define SPIDER_NET_DMA_TX_VALUE 0x80000000
+#define SPIDER_NET_TX_DMA_EN 0x80000000
+#define SPIDER_NET_GDTDCEIDIS 0x00000002
+#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
+ SPIDER_NET_GDTDCEIDIS
#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -329,55 +332,23 @@ enum spider_net_int2_status {
(~SPIDER_NET_TXINT) & \
(~SPIDER_NET_RXINT) )
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
+#define SPIDER_NET_GPREXEC 0x80000000
+#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-/* descriptor bits
- *
- * 1010 descriptor ready
- * 0 descr in middle of chain
- * 000 fixed to 0
- *
- * 0 no interrupt on completion
- * 000 fixed to 0
- * 1 no ipsec processing
- * 1 last descriptor for this frame
- * 00 no checksum
- * 10 tcp checksum
- * 11 udp checksum
- *
- * 00 fixed to 0
- * 0 fixed to 0
- * 0 no interrupt on response errors
- * 0 no interrupt on invalid descr
- * 0 no interrupt on dma process termination
- * 0 no interrupt on descr chain end
- * 0 no interrupt on descr complete
- *
- * 000 fixed to 0
- * 0 response error interrupt status
- * 0 invalid descr status
- * 0 dma termination status
- * 0 descr chain end status
- * 0 descr complete status */
-#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
-#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
-#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
-#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
-#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
-
-/* descr ready, descr is in middle of chain, get interrupt on completion */
-#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
-
-enum spider_net_descr_status {
- SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
- SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
- SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
- SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
- SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
- SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
- SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
-};
+#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
+#define SPIDER_NET_DMAC_NOCS 0x00040000
+#define SPIDER_NET_DMAC_TCP 0x00020000
+#define SPIDER_NET_DMAC_UDP 0x00030000
+#define SPIDER_NET_TXDCEST 0x08000000
+
+#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
+#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
+#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
struct spider_net_descr {
/* as defined by the hardware */
@@ -398,7 +369,7 @@ struct spider_net_descr {
} __attribute__((aligned(32)));
struct spider_net_descr_chain {
- /* we walk from tail to head */
+ spinlock_t lock;
struct spider_net_descr *head;
struct spider_net_descr *tail;
};
@@ -453,8 +424,6 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain;
- atomic_t rx_chain_refill;
- atomic_t tx_chain_release;
struct net_device_stats netdev_stats;
@@ -471,6 +440,9 @@ struct spider_net_card {
/* for ethtool */
int msg_enable;
+ int rx_desc;
+ int tx_desc;
+
struct spider_net_descr descr[0];
};
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index a5bb0b7633af..02209222b8c9 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
return 0;
}
+static void
+spider_net_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct spider_net_card *card = netdev->priv;
+
+ ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
+ ering->tx_pending = card->tx_desc;
+ ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
+ ering->rx_pending = card->rx_desc;
+}
+
struct ethtool_ops spider_net_ethtool_ops = {
.get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = {
.set_rx_csum = spider_net_ethtool_set_rx_csum,
.get_tx_csum = spider_net_ethtool_get_tx_csum,
.set_tx_csum = spider_net_ethtool_set_tx_csum,
+ .get_ringparam = spider_net_ethtool_get_ringparam,
};
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index ed1f59901ff4..c0a62b00ffc8 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -22,129 +22,13 @@
Support and updates available at
http://www.scyld.com/network/starfire.html
+ [link no longer provides useful info -jgarzik]
- -----------------------------------------------------------
-
- Linux kernel-specific changes:
-
- LK1.1.1 (jgarzik):
- - Use PCI driver interface
- - Fix MOD_xxx races
- - softnet fixups
-
- LK1.1.2 (jgarzik):
- - Merge Becker version 0.15
-
- LK1.1.3 (Andrew Morton)
- - Timer cleanups
-
- LK1.1.4 (jgarzik):
- - Merge Becker version 1.03
-
- LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
- - Support hardware Rx/Tx checksumming
- - Use the GFP firmware taken from Adaptec's Netware driver
-
- LK1.2.2 (Ion Badulescu)
- - Backported to 2.2.x
-
- LK1.2.3 (Ion Badulescu)
- - Fix the flaky mdio interface
- - More compat clean-ups
-
- LK1.2.4 (Ion Badulescu)
- - More 2.2.x initialization fixes
-
- LK1.2.5 (Ion Badulescu)
- - Several fixes from Manfred Spraul
-
- LK1.2.6 (Ion Badulescu)
- - Fixed ifup/ifdown/ifup problem in 2.4.x
-
- LK1.2.7 (Ion Badulescu)
- - Removed unused code
- - Made more functions static and __init
-
- LK1.2.8 (Ion Badulescu)
- - Quell bogus error messages, inform about the Tx threshold
- - Removed #ifdef CONFIG_PCI, this driver is PCI only
-
- LK1.2.9 (Ion Badulescu)
- - Merged Jeff Garzik's changes from 2.4.4-pre5
- - Added 2.2.x compatibility stuff required by the above changes
-
- LK1.2.9a (Ion Badulescu)
- - More updates from Jeff Garzik
-
- LK1.3.0 (Ion Badulescu)
- - Merged zerocopy support
-
- LK1.3.1 (Ion Badulescu)
- - Added ethtool support
- - Added GPIO (media change) interrupt support
-
- LK1.3.2 (Ion Badulescu)
- - Fixed 2.2.x compatibility issues introduced in 1.3.1
- - Fixed ethtool ioctl returning uninitialized memory
-
- LK1.3.3 (Ion Badulescu)
- - Initialize the TxMode register properly
- - Don't dereference dev->priv after freeing it
-
- LK1.3.4 (Ion Badulescu)
- - Fixed initialization timing problems
- - Fixed interrupt mask definitions
-
- LK1.3.5 (jgarzik)
- - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
-
- LK1.3.6:
- - Sparc64 support and fixes (Ion Badulescu)
- - Better stats and error handling (Ion Badulescu)
- - Use new pci_set_mwi() PCI API function (jgarzik)
-
- LK1.3.7 (Ion Badulescu)
- - minimal implementation of tx_timeout()
- - correctly shutdown the Rx/Tx engines in netdev_close()
- - added calls to netif_carrier_on/off
- (patch from Stefan Rompf <srompf@isg.de>)
- - VLAN support
-
- LK1.3.8 (Ion Badulescu)
- - adjust DMA burst size on sparc64
- - 64-bit support
- - reworked zerocopy support for 64-bit buffers
- - working and usable interrupt mitigation/latency
- - reduced Tx interrupt frequency for lower interrupt overhead
-
- LK1.3.9 (Ion Badulescu)
- - bugfix for mcast filter
- - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
-
- LK1.4.0 (Ion Badulescu)
- - NAPI support
-
- LK1.4.1 (Ion Badulescu)
- - flush PCI posting buffers after disabling Rx interrupts
- - put the chip to a D3 slumber on driver unload
- - added config option to enable/disable NAPI
-
- LK1.4.2 (Ion Badulescu)
- - finally added firmware (GPL'ed by Adaptec)
- - removed compatibility code for 2.2.x
-
- LK1.4.2.1 (Ion Badulescu)
- - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
- - added 32-bit padding to outgoing skb's, removed previous workaround
-
-TODO: - fix forced speed/duplexing code (broken a long time ago, when
- somebody converted the driver to use the generic MII code)
- - fix VLAN support
*/
#define DRV_NAME "starfire"
-#define DRV_VERSION "1.03+LK1.4.2.1"
-#define DRV_RELDATE "October 3, 2005"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "June 27, 2006"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -846,7 +730,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
goto err_out_free_netdev;
}
- /* ioremap is borken in Linux-2.2.x/sparc64 */
base = ioremap(ioaddr, io_size);
if (!base) {
printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 643fceae3db5..698568e751da 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -16,91 +16,13 @@
Support and updates available at
http://www.scyld.com/network/sundance.html
+ [link no longer provides useful info -jgarzik]
-
- Version LK1.01a (jgarzik):
- - Replace some MII-related magic numbers with constants
-
- Version LK1.02 (D-Link):
- - Add new board to PCI ID list
- - Fix multicast bug
-
- Version LK1.03 (D-Link):
- - New Rx scheme, reduce Rx congestion
- - Option to disable flow control
-
- Version LK1.04 (D-Link):
- - Tx timeout recovery
- - More support for ethtool.
-
- Version LK1.04a:
- - Remove unused/constant members from struct pci_id_info
- (which then allows removal of 'drv_flags' from private struct)
- (jgarzik)
- - If no phy is found, fail to load that board (jgarzik)
- - Always start phy id scan at id 1 to avoid problems (Donald Becker)
- - Autodetect where mii_preable_required is needed,
- default to not needed. (Donald Becker)
-
- Version LK1.04b:
- - Remove mii_preamble_required module parameter (Donald Becker)
- - Add per-interface mii_preamble_required (setting is autodetected)
- (Donald Becker)
- - Remove unnecessary cast from void pointer (jgarzik)
- - Re-align comments in private struct (jgarzik)
-
- Version LK1.04c (jgarzik):
- - Support bitmapped message levels (NETIF_MSG_xxx), and the
- two ethtool ioctls that get/set them
- - Don't hand-code MII ethtool support, use standard API/lib
-
- Version LK1.04d:
- - Merge from Donald Becker's sundance.c: (Jason Lunz)
- * proper support for variably-sized MTUs
- * default to PIO, to fix chip bugs
- - Add missing unregister_netdev (Jason Lunz)
- - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
- - Better rx buf size calculation (Donald Becker)
-
- Version LK1.05 (D-Link):
- - Fix DFE-580TX packet drop issue (for DL10050C)
- - Fix reset_tx logic
-
- Version LK1.06 (D-Link):
- - Fix crash while unloading driver
-
- Versin LK1.06b (D-Link):
- - New tx scheme, adaptive tx_coalesce
-
- Version LK1.07 (D-Link):
- - Fix tx bugs in big-endian machines
- - Remove unused max_interrupt_work module parameter, the new
- NAPI-like rx scheme doesn't need it.
- - Remove redundancy get_stats() in intr_handler(), those
- I/O access could affect performance in ARM-based system
- - Add Linux software VLAN support
-
- Version LK1.08 (Philippe De Muyter phdm@macqel.be):
- - Fix bug of custom mac address
- (StationAddr register only accept word write)
-
- Version LK1.09 (D-Link):
- - Fix the flowctrl bug.
- - Set Pause bit in MII ANAR if flow control enabled.
-
- Version LK1.09a (ICPlus):
- - Add the delay time in reading the contents of EEPROM
-
- Version LK1.10 (Philippe De Muyter phdm@macqel.be):
- - Make 'unblock interface after Tx underrun' work
-
- Version LK1.11 (Pedro Alejandro Lopez-Valencia palopezv at gmail.com):
- - Add support for IC Plus Corporation IP100A chipset
*/
#define DRV_NAME "sundance"
-#define DRV_VERSION "1.01+LK1.11"
-#define DRV_RELDATE "14-Jun-2006"
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "27-Jun-2006"
/* The user-configurable values.
@@ -185,7 +107,7 @@ static char *media[MAX_UNITS];
#endif
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
KERN_INFO " http://www.scyld.com/network/sundance.html\n";
@@ -282,15 +204,15 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static struct pci_device_id sundance_pci_tbl[] = {
- {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
- {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
- {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
- {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
- {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
- {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
- {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
- {0,}
+static const struct pci_device_id sundance_pci_tbl[] = {
+ { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
+ { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
+ { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
+ { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
+ { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { }
};
MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
@@ -301,7 +223,7 @@ enum {
struct pci_id_info {
const char *name;
};
-static const struct pci_id_info pci_id_tbl[] = {
+static const struct pci_id_info pci_id_tbl[] __devinitdata = {
{"D-Link DFE-550TX FAST Ethernet Adapter"},
{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
{"D-Link DFE-580TX 4 port Server Adapter"},
@@ -309,7 +231,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{"D-Link DL10050-based FAST Ethernet Adapter"},
{"Sundance Technology Alta"},
{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
- {NULL,}, /* 0 terminated list. */
+ { } /* terminate list. */
};
/* This driver was written to use PCI memory space, however x86-oriented
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 8673fd4c08c7..c6f5bc3c042f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
}
static struct pci_device_id happymeal_pci_ids[] = {
- {
- .vendor = PCI_VENDOR_ID_SUN,
- .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
static int __init happy_meal_pci_init(void)
{
- return pci_module_init(&hme_pci_driver);
+ return pci_register_driver(&hme_pci_driver);
}
static void happy_meal_pci_exit(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..ec0413609f36 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
{
if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
(idprom->id_machtype == (SM_SUN4|SM_4_470))) {
- memset(&sun4_sdev, 0, sizeof(sdev));
+ memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
sun4_sdev.irqs[0] = 6;
return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
static int __exit sunlance_sun4_remove(void)
{
- struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev);
+ struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
struct net_device *net_dev = lp->dev;
unregister_netdevice(net_dev);
- lance_free_hwresources(root_lance_dev);
+ lance_free_hwresources(lp);
free_netdev(net_dev);
- dev_set_drvdata(&sun4_sdev->dev, NULL);
+ dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
return 0;
}
@@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void)
static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
{
struct sbus_dev *sdev = to_sbus_device(&dev->dev);
- struct device_node *dp = dev->node;
int err;
- if (!strcmp(dp->name, "le")) {
- err = sparc_lance_probe_one(sdev, NULL, NULL);
- } else if (!strcmp(dp->name, "ledma")) {
- struct sbus_dma *ledma = find_ledma(sdev);
+ if (sdev->parent) {
+ struct of_device *parent = &sdev->parent->ofdev;
- err = sparc_lance_probe_one(sdev->child, ledma, NULL);
- } else {
- BUG_ON(strcmp(dp->name, "lebuffer"));
+ if (!strcmp(parent->node->name, "ledma")) {
+ struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
- err = sparc_lance_probe_one(sdev->child, NULL, sdev);
- }
+ err = sparc_lance_probe_one(sdev, ledma, NULL);
+ } else if (!strcmp(parent->node->name, "lebuffer")) {
+ err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
+ } else
+ err = sparc_lance_probe_one(sdev, NULL, NULL);
+ } else
+ err = sparc_lance_probe_one(sdev, NULL, NULL);
return err;
}
@@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = {
{
.name = "le",
},
- {
- .name = "ledma",
- },
- {
- .name = "lebuffer",
- },
{},
};
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f645921aff8b..eafabb253f08 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.62"
-#define DRV_MODULE_RELDATE "June 30, 2006"
+#define DRV_MODULE_VERSION "3.65"
+#define DRV_MODULE_RELDATE "August 07, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -123,9 +123,6 @@
TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
-#define TX_BUFFS_AVAIL(TP) \
- ((TP)->tx_pending - \
- (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
spin_unlock(&tp->lock);
}
+static inline u32 tg3_tx_avail(struct tg3 *tp)
+{
+ smp_mb();
+ return (tp->tx_pending -
+ ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+}
+
/* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does.
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
tp->tx_cons = sw_idx;
- if (unlikely(netif_queue_stopped(tp->dev))) {
- spin_lock(&tp->tx_lock);
+ /* Need to make the tx_cons update visible to tg3_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that tg3_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(tp->dev) &&
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
+ netif_tx_lock(tp->dev);
if (netif_queue_stopped(tp->dev) &&
- (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
+ netif_tx_unlock(tp->dev);
}
}
@@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = dev_alloc_skb(skb_size);
+ skb = netdev_alloc_skb(tp->dev, skb_size);
if (skb == NULL)
return -ENOMEM;
- skb->dev = tp->dev;
skb_reserve(skb, tp->rx_offset);
mapping = pci_map_single(tp->pdev, skb->data,
@@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget)
tg3_recycle_rx(tp, opaque_key,
desc_idx, *post_ptr);
- copy_skb = dev_alloc_skb(len + 2);
+ copy_skb = netdev_alloc_skb(tp->dev, len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- copy_skb->dev = tp->dev;
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -3590,6 +3600,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
+ "aborting.\n", tp->dev->name);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ netif_poll_enable(tp->dev);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
@@ -3630,13 +3662,15 @@ static void tg3_reset_task(void *_data)
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp, 1);
+ if (tg3_init_hw(tp, 1))
+ goto out;
tg3_netif_start(tp);
if (restart_timer)
mod_timer(&tp->timer, jiffies + 1);
+out:
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
@@ -3773,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -3869,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
- spin_lock(&tp->tx_lock);
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
}
out_unlock:
@@ -3896,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
struct sk_buff *segs, *nskb;
/* Estimate the number of fragments in the worst case */
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
netif_stop_queue(tp->dev);
return NETDEV_TX_BUSY;
}
@@ -3936,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -4086,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
- spin_lock(&tp->tx_lock);
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
}
out_unlock:
@@ -4124,6 +4154,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
+ int err;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
@@ -4144,13 +4175,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp, 0);
+ err = tg3_restart_hw(tp, 0);
- tg3_netif_start(tp);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
- return 0;
+ return err;
}
/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4264,7 @@ static void tg3_free_rings(struct tg3 *tp)
* end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep.
*/
-static void tg3_init_rings(struct tg3 *tp)
+static int tg3_init_rings(struct tg3 *tp)
{
u32 i;
@@ -4281,18 +4313,38 @@ static void tg3_init_rings(struct tg3 *tp)
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
- -1, i) < 0)
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX standard ring, "
+ "only %d out of %d buffers were allocated "
+ "successfully.\n",
+ tp->dev->name, i, tp->rx_pending);
+ if (i == 0)
+ return -ENOMEM;
+ tp->rx_pending = i;
break;
+ }
}
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
- -1, i) < 0)
+ -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX jumbo ring, "
+ "only %d out of %d buffers were "
+ "allocated successfully.\n",
+ tp->dev->name, i, tp->rx_jumbo_pending);
+ if (i == 0) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ tp->rx_jumbo_pending = i;
break;
+ }
}
}
+ return 0;
}
/*
@@ -5815,6 +5867,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
+ int err = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -5832,9 +5885,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 0);
-
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 0);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
} else {
spin_lock_bh(&tp->lock);
@@ -5842,7 +5895,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
spin_unlock_bh(&tp->lock);
}
- return 0;
+ return err;
}
/* tp->lock is held. */
@@ -5942,7 +5995,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* can only do this after the hardware has been
* successfully reset.
*/
- tg3_init_rings(tp);
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
@@ -7956,7 +8011,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8035,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8057,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if (netif_running(dev)) {
tg3_netif_stop(tp);
@@ -8025,13 +8081,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8567,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
err = -EIO;
tx_len = 1514;
- skb = dev_alloc_skb(tx_len);
+ skb = netdev_alloc_skb(tp->dev, tx_len);
if (!skb)
return -ENOMEM;
@@ -8666,7 +8723,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, 1);
+ if (err)
+ return TG3_LOOPBACK_FAILED;
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8799,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ if (!tg3_restart_hw(tp, 1))
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
@@ -10078,6 +10137,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -11419,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif
spin_lock_init(&tp->lock);
- spin_lock_init(&tp->tx_lock);
spin_lock_init(&tp->indirect_lock);
INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
@@ -11697,7 +11757,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ if (tg3_restart_hw(tp, 1))
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
@@ -11705,6 +11766,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_attach(dev);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
}
@@ -11731,16 +11793,19 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static struct pci_driver tg3_driver = {
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ba2c98711c88..3ecf356cfb08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2079,9 +2079,9 @@ struct tg3 {
* lock: Held during reset, PHY access, timer, and when
* updating tg3_flags and tg3_flags2.
*
- * tx_lock: Held during tg3_start_xmit and tg3_tx only
- * when calling netif_[start|stop]_queue.
- * tg3_start_xmit is protected by netif_tx_lock.
+ * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
+ * netif_tx_lock when it needs to call
+ * netif_wake_queue.
*
* Both of these locks are to be held with BH safety.
*
@@ -2118,8 +2118,6 @@ struct tg3 {
u32 tx_cons;
u32 tx_pending;
- spinlock_t tx_lock;
-
struct tg3_tx_buffer_desc *tx_ring;
struct tx_ring_info *tx_buffers;
dma_addr_t tx_desc_mapping;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9f491563944e..4470025ff7f8 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
/* version and credits */
#ifndef PCMCIA
-static char version[] __initdata =
+static char version[] __devinitdata =
"\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
" v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
" v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
static int __devinitdata turbo_searched = 0;
#ifndef PCMCIA
-static __u32 ibmtr_mem_base __initdata = 0xd0000;
+static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
#endif
static void __devinit PrtChanID(char *pcid, short stride)
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cd2e0251e2bc..85a7f797d343 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param(ringspeed, int, 0);
-static struct net_device *setup_card(int n)
+static struct net_device * __init setup_card(int n)
{
struct net_device *dev = alloc_trdev(sizeof(struct net_local));
int err;
@@ -5696,9 +5696,8 @@ out:
free_netdev(dev);
return ERR_PTR(err);
}
-
-int init_module(void)
+int __init init_module(void)
{
int i, found = 0;
struct net_device *dev;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index b4c0d101a7d7..eba9083da146 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <asm/irq.h>
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/drivers.html\n";
@@ -224,24 +224,21 @@ static const struct pci_device_id w840_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
+enum {
+ netdev_res_size = 128, /* size of PCI BAR resource */
+};
+
struct pci_id_info {
const char *name;
- struct match_info {
- int pci, pci_mask, subsystem, subsystem_mask;
- int revision, revision_mask; /* Only 8 bits. */
- } id;
- int io_size; /* Needed for I/O region check or ioremap(). */
- int drv_flags; /* Driver use, intended as capability flags. */
+ int drv_flags; /* Driver use, intended as capability flags. */
};
-static struct pci_id_info pci_id_tbl[] = {
- {"Winbond W89c840", /* Sometime a Level-One switch card. */
- { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
- 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
- {"Winbond W89c840", { 0x08401050, 0xffffffff, },
- 128, CanHaveMII | HasBrokenTx},
- {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
- 128, CanHaveMII | HasBrokenTx},
- {NULL,}, /* 0 terminated list. */
+
+static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+ { /* Sometime a Level-One switch card. */
+ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ { "Winbond W89c840", CanHaveMII | HasBrokenTx},
+ { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
+ { } /* terminate list. */
};
/* This driver was written to use PCI memory space, however some x86 systems
@@ -399,7 +396,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
#ifdef USE_IO_OPS
bar = 0;
#endif
- ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size);
+ ioaddr = pci_iomap(pdev, bar, netdev_res_size);
if (!ioaddr)
goto err_out_free_res;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index f874e4f6ccf6..cf43390d2c80 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
static int __init xircom_init(void)
{
- pci_register_driver(&xircom_ops);
- return 0;
+ return pci_register_driver(&xircom_ops);
}
static void __exit xircom_exit(void)
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index 091ebb7a62f6..17ca7dc42e6f 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -10,26 +10,11 @@
410 Severn Ave., Suite 210
Annapolis MD 21403
- -----------------------------------------------------------
-
- Linux kernel-specific changes:
-
- LK1.0 (Ion Badulescu)
- - Major cleanup
- - Use 2.4 PCI API
- - Support ethtool
- - Rewrite perfect filter/hash code
- - Use interrupts for media changes
-
- LK1.1 (Ion Badulescu)
- - Disallow negotiation of unsupported full-duplex modes
*/
#define DRV_NAME "xircom_tulip_cb"
-#define DRV_VERSION "0.91+LK1.1"
-#define DRV_RELDATE "October 11, 2001"
-
-#define CARDBUS 1
+#define DRV_VERSION "0.92"
+#define DRV_RELDATE "June 27, 2006"
/* A few user-configurable values. */
@@ -306,10 +291,10 @@ struct xircom_private {
struct xircom_tx_desc tx_ring[TX_RING_SIZE];
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct sk_buff* tx_skbuff[TX_RING_SIZE];
-#ifdef CARDBUS
+
/* The X3201-3 requires 4-byte aligned tx bufs */
struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
-#endif
+
/* The addresses of receive-in-place skbuffs. */
struct sk_buff* rx_skbuff[RX_RING_SIZE];
u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
@@ -908,10 +893,8 @@ static void xircom_init_ring(struct net_device *dev)
tp->tx_skbuff[i] = NULL;
tp->tx_ring[i].status = 0;
tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
-#ifdef CARDBUS
if (tp->chip_id == X3201_3)
tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
-#endif /* CARDBUS */
}
tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
}
@@ -931,12 +914,10 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tp->cur_tx % TX_RING_SIZE;
tp->tx_skbuff[entry] = skb;
-#ifdef CARDBUS
if (tp->chip_id == X3201_3) {
memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
} else
-#endif
tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 063816f2b11e..4103c37172f9 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
- if(skb_tso_size(skb))
+ if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
- if(skb_tso_size(skb)) {
+ if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 000000000000..47f49ef72bdc
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4278 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * QE UCC Gigabit Ethernet Driver
+ *
+ * Changelog:
+ * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/mii.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#include "ucc_geth.h"
+#include "ucc_geth_phy.h"
+
+#undef DEBUG
+
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
+#define DRV_NAME "ucc_geth"
+
+#define ugeth_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugeth_dbg(format, arg...) \
+ ugeth_printk(KERN_DEBUG , format , ## arg)
+#define ugeth_err(format, arg...) \
+ ugeth_printk(KERN_ERR , format , ## arg)
+#define ugeth_info(format, arg...) \
+ ugeth_printk(KERN_INFO , format , ## arg)
+#define ugeth_warn(format, arg...) \
+ ugeth_printk(KERN_WARNING , format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugeth_vdbg ugeth_dbg
+#else
+#define ugeth_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+
+static DEFINE_SPINLOCK(ugeth_lock);
+
+static ucc_geth_info_t ugeth_primary_info = {
+ .uf_info = {
+ .bd_mem_part = MEM_PART_SYSTEM,
+ .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
+ .max_rx_buf_length = 1536,
+/* FIXME: should be changed in run time for 1G and 100M */
+#ifdef CONFIG_UGETH_HAS_GIGA
+ .urfs = UCC_GETH_URFS_GIGA_INIT,
+ .urfet = UCC_GETH_URFET_GIGA_INIT,
+ .urfset = UCC_GETH_URFSET_GIGA_INIT,
+ .utfs = UCC_GETH_UTFS_GIGA_INIT,
+ .utfet = UCC_GETH_UTFET_GIGA_INIT,
+ .utftt = UCC_GETH_UTFTT_GIGA_INIT,
+#else
+ .urfs = UCC_GETH_URFS_INIT,
+ .urfet = UCC_GETH_URFET_INIT,
+ .urfset = UCC_GETH_URFSET_INIT,
+ .utfs = UCC_GETH_UTFS_INIT,
+ .utfet = UCC_GETH_UTFET_INIT,
+ .utftt = UCC_GETH_UTFTT_INIT,
+#endif
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+ .numQueuesTx = 1,
+ .numQueuesRx = 1,
+ .extendedFilteringChainPointer = ((uint32_t) NULL),
+ .typeorlen = 3072 /*1536 */ ,
+ .nonBackToBackIfgPart1 = 0x40,
+ .nonBackToBackIfgPart2 = 0x60,
+ .miminumInterFrameGapEnforcement = 0x50,
+ .backToBackInterFrameGap = 0x60,
+ .mblinterval = 128,
+ .nortsrbytetime = 5,
+ .fracsiz = 1,
+ .strictpriorityq = 0xff,
+ .altBebTruncation = 0xa,
+ .excessDefer = 1,
+ .maxRetransmission = 0xf,
+ .collisionWindow = 0x37,
+ .receiveFlowControl = 1,
+ .maxGroupAddrInHash = 4,
+ .maxIndAddrInHash = 4,
+ .prel = 7,
+ .maxFrameLength = 1518,
+ .minFrameLength = 64,
+ .maxD1Length = 1520,
+ .maxD2Length = 1520,
+ .vlantype = 0x8100,
+ .ecamptr = ((uint32_t) NULL),
+ .eventRegMask = UCCE_OTHER,
+ .pausePeriod = 0xf000,
+ .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
+ .bdRingLenTx = {
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN},
+
+ .bdRingLenRx = {
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN},
+
+ .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
+ .largestexternallookupkeysize =
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
+ .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
+ .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
+ .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
+ .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
+ .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
+ .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
+ .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+};
+
+static ucc_geth_info_t ugeth_info[8];
+
+#ifdef DEBUG
+static void mem_disp(u8 *addr, int size)
+{
+ u8 *i;
+ int size16Aling = (size >> 4) << 4;
+ int size4Aling = (size >> 2) << 2;
+ int notAlign = 0;
+ if (size % 16)
+ notAlign = 1;
+
+ for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
+ printk("0x%08x: %08x %08x %08x %08x\r\n",
+ (u32) i,
+ *((u32 *) (i)),
+ *((u32 *) (i + 4)),
+ *((u32 *) (i + 8)), *((u32 *) (i + 12)));
+ if (notAlign == 1)
+ printk("0x%08x: ", (u32) i);
+ for (; (u32) i < (u32) addr + size4Aling; i += 4)
+ printk("%08x ", *((u32 *) (i)));
+ for (; (u32) i < (u32) addr + size; i++)
+ printk("%02x", *((u8 *) (i)));
+ if (notAlign == 1)
+ printk("\r\n");
+}
+#endif /* DEBUG */
+
+#ifdef CONFIG_UGETH_FILTERING
+static void enqueue(struct list_head *node, struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ugeth_lock, flags);
+ list_add_tail(node, lh);
+ spin_unlock_irqrestore(ugeth_lock, flags);
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static struct list_head *dequeue(struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ugeth_lock, flags);
+ if (!list_empty(lh)) {
+ struct list_head *node = lh->next;
+ list_del(node);
+ spin_unlock_irqrestore(ugeth_lock, flags);
+ return node;
+ } else {
+ spin_unlock_irqrestore(ugeth_lock, flags);
+ return NULL;
+ }
+}
+
+static int get_interface_details(enet_interface_e enet_interface,
+ enet_speed_e *speed,
+ int *r10m,
+ int *rmm,
+ int *rpm,
+ int *tbi, int *limited_to_full_duplex)
+{
+ /* Analyze enet_interface according to Interface Mode
+ Configuration table */
+ switch (enet_interface) {
+ case ENET_10_MII:
+ *speed = ENET_SPEED_10BT;
+ break;
+ case ENET_10_RMII:
+ *speed = ENET_SPEED_10BT;
+ *r10m = 1;
+ *rmm = 1;
+ break;
+ case ENET_10_RGMII:
+ *speed = ENET_SPEED_10BT;
+ *rpm = 1;
+ *r10m = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_100_MII:
+ *speed = ENET_SPEED_100BT;
+ break;
+ case ENET_100_RMII:
+ *speed = ENET_SPEED_100BT;
+ *rmm = 1;
+ break;
+ case ENET_100_RGMII:
+ *speed = ENET_SPEED_100BT;
+ *rpm = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_GMII:
+ *speed = ENET_SPEED_1000BT;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_RGMII:
+ *speed = ENET_SPEED_1000BT;
+ *rpm = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_TBI:
+ *speed = ENET_SPEED_1000BT;
+ *tbi = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_RTBI:
+ *speed = ENET_SPEED_1000BT;
+ *rpm = 1;
+ *tbi = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ 1)));
+
+ skb->dev = ugeth->dev;
+
+ BD_BUFFER_SET(bd,
+ dma_map_single(NULL,
+ skb->data,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE));
+
+ BD_STATUS_AND_LENGTH_SET(bd,
+ (R_E | R_I |
+ (BD_STATUS_AND_LENGTH(bd) & R_W)));
+
+ return skb;
+}
+
+static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
+{
+ u8 *bd;
+ u32 bd_status;
+ struct sk_buff *skb;
+ int i;
+
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ i = 0;
+
+ do {
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ skb = get_new_skb(ugeth, bd);
+
+ if (!skb) /* If can not allocate data buffer,
+ abort. Cleanup will be elsewhere */
+ return -ENOMEM;
+
+ ugeth->rx_skbuff[rxQ][i] = skb;
+
+ /* advance the BD pointer */
+ bd += UCC_GETH_SIZE_OF_BD;
+ i++;
+ } while (!(bd_status & R_W));
+
+ return 0;
+}
+
+static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ u32 thread_alignment,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ ugeth_err("fill_init_enet_entries: Can not get SNUM.");
+ return snum;
+ }
+ if ((i == 0) && skip_page_for_first_entry)
+ /* First entry of Rx does not have page */
+ init_enet_offset = 0;
+ else {
+ init_enet_offset =
+ qe_muram_alloc(thread_size, thread_alignment);
+ if (IS_MURAM_ERR(init_enet_offset)) {
+ ugeth_err
+ ("fill_init_enet_entries: Can not allocate DPRAM memory.");
+ qe_put_snum((u8) snum);
+ return -ENOMEM;
+ }
+ }
+ *(p_start++) =
+ ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
+ | risc;
+ }
+
+ return 0;
+}
+
+static int return_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ qe_muram_free(init_enet_offset);
+ }
+ *(p_start++) = 0; /* Just for cosmetics */
+ }
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ ugeth_info("Init enet entry %d:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(init_enet_offset));
+ mem_disp(qe_muram_addr(init_enet_offset),
+ thread_size);
+ }
+ p_start++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UGETH_FILTERING
+static enet_addr_container_t *get_enet_addr_container(void)
+{
+ enet_addr_container_t *enet_addr_cont;
+
+ /* allocate memory */
+ enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
+ if (!enet_addr_cont) {
+ ugeth_err("%s: No memory for enet_addr_container_t object.",
+ __FUNCTION__);
+ return NULL;
+ }
+
+ return enet_addr_cont;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
+{
+ kfree(enet_addr_cont);
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr, u8 paddr_num)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+
+ if (!(paddr_num < NUM_OF_PADDRS)) {
+ ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Ethernet frames are defined in Little Endian mode, */
+ /* therefore to insert the address we reverse the bytes. */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
+ (u16) (*p_enet_addr)[4]));
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
+ (u16) (*p_enet_addr)[2]));
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
+ (u16) (*p_enet_addr)[0]));
+
+ return 0;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+
+ if (!(paddr_num < NUM_OF_PADDRS)) {
+ ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Writing address ff.ff.ff.ff.ff.ff disables address
+ recognition for this register */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
+
+ return 0;
+}
+
+static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ u32 cecr_subblock;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Ethernet frames are defined in Little Endian mode,
+ therefor to insert */
+ /* the address to the hash (Big Endian mode), we reverse the bytes.*/
+ out_be16(&p_82xx_addr_filt->taddr.h,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
+ (u16) (*p_enet_addr)[4]));
+ out_be16(&p_82xx_addr_filt->taddr.m,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
+ (u16) (*p_enet_addr)[2]));
+ out_be16(&p_82xx_addr_filt->taddr.l,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
+ (u16) (*p_enet_addr)[0]));
+
+ qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+}
+
+#ifdef CONFIG_UGETH_MAGIC_PACKET
+static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ ucc_geth_t *ug_regs;
+ u32 maccfg2, uccm;
+
+ uccf = ugeth->uccf;
+ ug_regs = ugeth->ug_regs;
+
+ /* Enable interrupts for magic packet detection */
+ uccm = in_be32(uccf->p_uccm);
+ uccm |= UCCE_MPD;
+ out_be32(uccf->p_uccm, uccm);
+
+ /* Enable magic packet detection */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 |= MACCFG2_MPE;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+}
+
+static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ ucc_geth_t *ug_regs;
+ u32 maccfg2, uccm;
+
+ uccf = ugeth->uccf;
+ ug_regs = ugeth->ug_regs;
+
+ /* Disable interrupts for magic packet detection */
+ uccm = in_be32(uccf->p_uccm);
+ uccm &= ~UCCE_MPD;
+ out_be32(uccf->p_uccm, uccm);
+
+ /* Disable magic packet detection */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_MPE;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+}
+#endif /* MAGIC_PACKET */
+
+static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
+{
+ return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+}
+
+#ifdef DEBUG
+static void get_statistics(ucc_geth_private_t *ugeth,
+ ucc_geth_tx_firmware_statistics_t *
+ tx_firmware_statistics,
+ ucc_geth_rx_firmware_statistics_t *
+ rx_firmware_statistics,
+ ucc_geth_hardware_statistics_t *hardware_statistics)
+{
+ ucc_fast_t *uf_regs;
+ ucc_geth_t *ug_regs;
+ ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
+ ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = (ucc_fast_t *) ug_regs;
+ p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
+ p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
+
+ /* Tx firmware only if user handed pointer and driver actually
+ gathers Tx firmware statistics */
+ if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
+ tx_firmware_statistics->sicoltx =
+ in_be32(&p_tx_fw_statistics_pram->sicoltx);
+ tx_firmware_statistics->mulcoltx =
+ in_be32(&p_tx_fw_statistics_pram->mulcoltx);
+ tx_firmware_statistics->latecoltxfr =
+ in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
+ tx_firmware_statistics->frabortduecol =
+ in_be32(&p_tx_fw_statistics_pram->frabortduecol);
+ tx_firmware_statistics->frlostinmactxer =
+ in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
+ tx_firmware_statistics->carriersenseertx =
+ in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
+ tx_firmware_statistics->frtxok =
+ in_be32(&p_tx_fw_statistics_pram->frtxok);
+ tx_firmware_statistics->txfrexcessivedefer =
+ in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
+ tx_firmware_statistics->txpkts256 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts256);
+ tx_firmware_statistics->txpkts512 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts512);
+ tx_firmware_statistics->txpkts1024 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts1024);
+ tx_firmware_statistics->txpktsjumbo =
+ in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
+ }
+
+ /* Rx firmware only if user handed pointer and driver actually
+ * gathers Rx firmware statistics */
+ if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
+ int i;
+ rx_firmware_statistics->frrxfcser =
+ in_be32(&p_rx_fw_statistics_pram->frrxfcser);
+ rx_firmware_statistics->fraligner =
+ in_be32(&p_rx_fw_statistics_pram->fraligner);
+ rx_firmware_statistics->inrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
+ rx_firmware_statistics->outrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
+ rx_firmware_statistics->frtoolong =
+ in_be32(&p_rx_fw_statistics_pram->frtoolong);
+ rx_firmware_statistics->runt =
+ in_be32(&p_rx_fw_statistics_pram->runt);
+ rx_firmware_statistics->verylongevent =
+ in_be32(&p_rx_fw_statistics_pram->verylongevent);
+ rx_firmware_statistics->symbolerror =
+ in_be32(&p_rx_fw_statistics_pram->symbolerror);
+ rx_firmware_statistics->dropbsy =
+ in_be32(&p_rx_fw_statistics_pram->dropbsy);
+ for (i = 0; i < 0x8; i++)
+ rx_firmware_statistics->res0[i] =
+ p_rx_fw_statistics_pram->res0[i];
+ rx_firmware_statistics->mismatchdrop =
+ in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
+ rx_firmware_statistics->underpkts =
+ in_be32(&p_rx_fw_statistics_pram->underpkts);
+ rx_firmware_statistics->pkts256 =
+ in_be32(&p_rx_fw_statistics_pram->pkts256);
+ rx_firmware_statistics->pkts512 =
+ in_be32(&p_rx_fw_statistics_pram->pkts512);
+ rx_firmware_statistics->pkts1024 =
+ in_be32(&p_rx_fw_statistics_pram->pkts1024);
+ rx_firmware_statistics->pktsjumbo =
+ in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
+ rx_firmware_statistics->frlossinmacer =
+ in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
+ rx_firmware_statistics->pausefr =
+ in_be32(&p_rx_fw_statistics_pram->pausefr);
+ for (i = 0; i < 0x4; i++)
+ rx_firmware_statistics->res1[i] =
+ p_rx_fw_statistics_pram->res1[i];
+ rx_firmware_statistics->removevlan =
+ in_be32(&p_rx_fw_statistics_pram->removevlan);
+ rx_firmware_statistics->replacevlan =
+ in_be32(&p_rx_fw_statistics_pram->replacevlan);
+ rx_firmware_statistics->insertvlan =
+ in_be32(&p_rx_fw_statistics_pram->insertvlan);
+ }
+
+ /* Hardware only if user handed pointer and driver actually
+ gathers hardware statistics */
+ if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
+ hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
+ hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
+ hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
+ hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
+ hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
+ hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
+ hardware_statistics->txok = in_be32(&ug_regs->txok);
+ hardware_statistics->txcf = in_be16(&ug_regs->txcf);
+ hardware_statistics->tmca = in_be32(&ug_regs->tmca);
+ hardware_statistics->tbca = in_be32(&ug_regs->tbca);
+ hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
+ hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
+ hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
+ hardware_statistics->rmca = in_be32(&ug_regs->rmca);
+ hardware_statistics->rbca = in_be32(&ug_regs->rbca);
+ }
+}
+
+static void dump_bds(ucc_geth_private_t *ugeth)
+{
+ int i;
+ int length;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ if (ugeth->p_tx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenTx[i] *
+ UCC_GETH_SIZE_OF_BD);
+ ugeth_info("TX BDs[%d]", i);
+ mem_disp(ugeth->p_tx_bd_ring[i], length);
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenRx[i] *
+ UCC_GETH_SIZE_OF_BD);
+ ugeth_info("RX BDs[%d]", i);
+ mem_disp(ugeth->p_rx_bd_ring[i], length);
+ }
+ }
+}
+
+static void dump_regs(ucc_geth_private_t *ugeth)
+{
+ int i;
+
+ ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
+
+ ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcfg,
+ in_be32(&ugeth->ug_regs->miimng.miimcfg));
+ ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcom,
+ in_be32(&ugeth->ug_regs->miimng.miimcom));
+ ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimadd,
+ in_be32(&ugeth->ug_regs->miimng.miimadd));
+ ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcon,
+ in_be32(&ugeth->ug_regs->miimng.miimcon));
+ ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimstat,
+ in_be32(&ugeth->ug_regs->miimng.miimstat));
+ ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimind,
+ in_be32(&ugeth->ug_regs->miimng.miimind));
+ ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
+
+ if (ugeth->p_thread_data_tx) {
+ int numThreadsTxNumerical;
+ switch (ugeth->ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ numThreadsTxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data TXs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_tx);
+ for (i = 0; i < numThreadsTxNumerical; i++) {
+ ugeth_info("Thread data TX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_tx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
+ sizeof(ucc_geth_thread_data_tx_t));
+ }
+ }
+ if (ugeth->p_thread_data_rx) {
+ int numThreadsRxNumerical;
+ switch (ugeth->ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ numThreadsRxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data RX:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_rx);
+ for (i = 0; i < numThreadsRxNumerical; i++) {
+ ugeth_info("Thread data RX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_rx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
+ sizeof(ucc_geth_thread_data_rx_t));
+ }
+ }
+ if (ugeth->p_exf_glbl_param) {
+ ugeth_info("EXF global param:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_exf_glbl_param);
+ mem_disp((u8 *) ugeth->p_exf_glbl_param,
+ sizeof(*ugeth->p_exf_glbl_param));
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ ugeth_info("TX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
+ ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->
+ schedulerbasepointer));
+ ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ ugeth_info("RX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
+ ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ for (i = 0; i < 64; i++)
+ ugeth_info
+ ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
+ i,
+ (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ ugeth_info("Send Q memory registers:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_send_q_mem_reg);
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ ugeth_info("SQQD[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
+ mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
+ sizeof(ucc_geth_send_queue_qd_t));
+ }
+ }
+ if (ugeth->p_scheduler) {
+ ugeth_info("Scheduler:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
+ mem_disp((u8 *) ugeth->p_scheduler,
+ sizeof(*ugeth->p_scheduler));
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ ugeth_info("TX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_tx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
+ sizeof(*ugeth->p_tx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ ugeth_info("RX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
+ sizeof(*ugeth->p_rx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ ugeth_info("RX IRQ coalescing tables:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_irq_coalescing_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX IRQ coalescing table entry[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ ugeth_info
+ ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ ugeth_info
+ ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
+ }
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ ugeth_info("RX BD QS tables:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX BD QS table[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i]);
+ ugeth_info
+ ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ ugeth_info
+ ("bdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ ugeth_info
+ ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ ugeth_info
+ ("externalbdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ ugeth_info("ucode RX Prefetched BDs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
+ mem_disp((u8 *)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)),
+ sizeof(ucc_geth_rx_prefetched_bds_t));
+ }
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ int size;
+ ugeth_info("Init enet param shadow:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_init_enet_param_shadow);
+ mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
+ sizeof(*ugeth->p_init_enet_param_shadow));
+
+ size = sizeof(ucc_geth_thread_rx_pram_t);
+ if (ugeth->ug_info->rxExtendedFiltering) {
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ sizeof(ucc_geth_thread_tx_pram_t),
+ ugeth->ug_info->riscTx, 0);
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
+ ugeth->ug_info->riscRx, 1);
+ }
+}
+#endif /* DEBUG */
+
+static void init_default_reg_vals(volatile u32 *upsmr_register,
+ volatile u32 *maccfg1_register,
+ volatile u32 *maccfg2_register)
+{
+ out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
+ out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
+ out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
+}
+
+static int init_half_duplex_params(int alt_beb,
+ int back_pressure_no_backoff,
+ int no_backoff,
+ int excess_defer,
+ u8 alt_beb_truncation,
+ u8 max_retransmissions,
+ u8 collision_window,
+ volatile u32 *hafdup_register)
+{
+ u32 value = 0;
+
+ if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
+ (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
+ (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
+ return -EINVAL;
+
+ value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
+
+ if (alt_beb)
+ value |= HALFDUP_ALT_BEB;
+ if (back_pressure_no_backoff)
+ value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
+ if (no_backoff)
+ value |= HALFDUP_NO_BACKOFF;
+ if (excess_defer)
+ value |= HALFDUP_EXCESSIVE_DEFER;
+
+ value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
+
+ value |= collision_window;
+
+ out_be32(hafdup_register, value);
+ return 0;
+}
+
+static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
+ u8 non_btb_ipg,
+ u8 min_ifg,
+ u8 btb_ipg,
+ volatile u32 *ipgifg_register)
+{
+ u32 value = 0;
+
+ /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
+ IPG part 2 */
+ if (non_btb_cs_ipg > non_btb_ipg)
+ return -EINVAL;
+
+ if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
+ (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
+ /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
+ (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
+ return -EINVAL;
+
+ value |=
+ ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
+ IPGIFG_NBTB_CS_IPG_MASK);
+ value |=
+ ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
+ IPGIFG_NBTB_IPG_MASK);
+ value |=
+ ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
+ IPGIFG_MIN_IFG_MASK);
+ value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
+
+ out_be32(ipgifg_register, value);
+ return 0;
+}
+
+static int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable,
+ u16 pause_period,
+ u16 extension_field,
+ volatile u32 *upsmr_register,
+ volatile u32 *uempr_register,
+ volatile u32 *maccfg1_register)
+{
+ u32 value = 0;
+
+ /* Set UEMPR register */
+ value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
+ value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
+ out_be32(uempr_register, value);
+
+ /* Set UPSMR register */
+ value = in_be32(upsmr_register);
+ value |= automatic_flow_control_mode;
+ out_be32(upsmr_register, value);
+
+ value = in_be32(maccfg1_register);
+ if (rx_flow_control_enable)
+ value |= MACCFG1_FLOW_RX;
+ if (tx_flow_control_enable)
+ value |= MACCFG1_FLOW_TX;
+ out_be32(maccfg1_register, value);
+
+ return 0;
+}
+
+static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
+ int auto_zero_hardware_statistics,
+ volatile u32 *upsmr_register,
+ volatile u16 *uescr_register)
+{
+ u32 upsmr_value = 0;
+ u16 uescr_value = 0;
+ /* Enable hardware statistics gathering if requested */
+ if (enable_hardware_statistics) {
+ upsmr_value = in_be32(upsmr_register);
+ upsmr_value |= UPSMR_HSE;
+ out_be32(upsmr_register, upsmr_value);
+ }
+
+ /* Clear hardware statistics counters */
+ uescr_value = in_be16(uescr_register);
+ uescr_value |= UESCR_CLRCNT;
+ /* Automatically zero hardware statistics counters on read,
+ if requested */
+ if (auto_zero_hardware_statistics)
+ uescr_value |= UESCR_AUTOZ;
+ out_be16(uescr_register, uescr_value);
+
+ return 0;
+}
+
+static int init_firmware_statistics_gathering_mode(int
+ enable_tx_firmware_statistics,
+ int enable_rx_firmware_statistics,
+ volatile u32 *tx_rmon_base_ptr,
+ u32 tx_firmware_statistics_structure_address,
+ volatile u32 *rx_rmon_base_ptr,
+ u32 rx_firmware_statistics_structure_address,
+ volatile u16 *temoder_register,
+ volatile u32 *remoder_register)
+{
+ /* Note: this function does not check if */
+ /* the parameters it receives are NULL */
+ u16 temoder_value;
+ u32 remoder_value;
+
+ if (enable_tx_firmware_statistics) {
+ out_be32(tx_rmon_base_ptr,
+ tx_firmware_statistics_structure_address);
+ temoder_value = in_be16(temoder_register);
+ temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
+ out_be16(temoder_register, temoder_value);
+ }
+
+ if (enable_rx_firmware_statistics) {
+ out_be32(rx_rmon_base_ptr,
+ rx_firmware_statistics_structure_address);
+ remoder_value = in_be32(remoder_register);
+ remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
+ out_be32(remoder_register, remoder_value);
+ }
+
+ return 0;
+}
+
+static int init_mac_station_addr_regs(u8 address_byte_0,
+ u8 address_byte_1,
+ u8 address_byte_2,
+ u8 address_byte_3,
+ u8 address_byte_4,
+ u8 address_byte_5,
+ volatile u32 *macstnaddr1_register,
+ volatile u32 *macstnaddr2_register)
+{
+ u32 value = 0;
+
+ /* Example: for a station address of 0x12345678ABCD, */
+ /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
+
+ /* MACSTNADDR1 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 5 station address byte 4 */
+ /* 16 23 24 31 */
+ /* station address byte 3 station address byte 2 */
+ value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
+ value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
+ value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr1_register, value);
+
+ /* MACSTNADDR2 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 1 station address byte 0 */
+ /* 16 23 24 31 */
+ /* reserved reserved */
+ value = 0;
+ value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr2_register, value);
+
+ return 0;
+}
+
+static int init_mac_duplex_mode(int full_duplex,
+ int limited_to_full_duplex,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ /* some interfaces must work in full duplex mode */
+ if ((full_duplex == 0) && (limited_to_full_duplex == 1))
+ return -EINVAL;
+
+ value = in_be32(maccfg2_register);
+
+ if (full_duplex)
+ value |= MACCFG2_FDX;
+ else
+ value &= ~MACCFG2_FDX;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_check_frame_length_mode(int length_check,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ value = in_be32(maccfg2_register);
+
+ if (length_check)
+ value |= MACCFG2_LC;
+ else
+ value &= ~MACCFG2_LC;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_preamble_length(u8 preamble_length,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ if ((preamble_length < 3) || (preamble_length > 7))
+ return -EINVAL;
+
+ value = in_be32(maccfg2_register);
+ value &= ~MACCFG2_PREL_MASK;
+ value |= (preamble_length << MACCFG2_PREL_SHIFT);
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_mii_management_configuration(int reset_mgmt,
+ int preamble_supress,
+ volatile u32 *miimcfg_register,
+ volatile u32 *miimind_register)
+{
+ unsigned int timeout = PHY_INIT_TIMEOUT;
+ u32 value = 0;
+
+ value = in_be32(miimcfg_register);
+ if (reset_mgmt) {
+ value |= MIIMCFG_RESET_MANAGEMENT;
+ out_be32(miimcfg_register, value);
+ }
+
+ value = 0;
+
+ if (preamble_supress)
+ value |= MIIMCFG_NO_PREAMBLE;
+
+ value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
+ out_be32(miimcfg_register, value);
+
+ /* Wait until the bus is free */
+ while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
+ cpu_relax();
+
+ if (timeout <= 0) {
+ ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_rx_parameters(int reject_broadcast,
+ int receive_short_frames,
+ int promiscuous, volatile u32 *upsmr_register)
+{
+ u32 value = 0;
+
+ value = in_be32(upsmr_register);
+
+ if (reject_broadcast)
+ value |= UPSMR_BRO;
+ else
+ value &= ~UPSMR_BRO;
+
+ if (receive_short_frames)
+ value |= UPSMR_RSH;
+ else
+ value &= ~UPSMR_RSH;
+
+ if (promiscuous)
+ value |= UPSMR_PRO;
+ else
+ value &= ~UPSMR_PRO;
+
+ out_be32(upsmr_register, value);
+
+ return 0;
+}
+
+static int init_max_rx_buff_len(u16 max_rx_buf_len,
+ volatile u16 *mrblr_register)
+{
+ /* max_rx_buf_len value must be a multiple of 128 */
+ if ((max_rx_buf_len == 0)
+ || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+ return -EINVAL;
+
+ out_be16(mrblr_register, max_rx_buf_len);
+ return 0;
+}
+
+static int init_min_frame_len(u16 min_frame_length,
+ volatile u16 *minflr_register,
+ volatile u16 *mrblr_register)
+{
+ u16 mrblr_value = 0;
+
+ mrblr_value = in_be16(mrblr_register);
+ if (min_frame_length >= (mrblr_value - 4))
+ return -EINVAL;
+
+ out_be16(minflr_register, min_frame_length);
+ return 0;
+}
+
+static int adjust_enet_interface(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_info_t *ug_info;
+ ucc_geth_t *ug_regs;
+ ucc_fast_t *uf_regs;
+ enet_speed_e speed;
+ int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
+ 0, limited_to_full_duplex = 0;
+ u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
+ u16 value;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_info = ugeth->ug_info;
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ /* Analyze enet_interface according to Interface Mode Configuration
+ table */
+ ret_val =
+ get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
+ &rpm, &tbi, &limited_to_full_duplex);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: half duplex not supported in requested configuration.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ /* Set MACCFG2 */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == ENET_SPEED_1000BT)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ maccfg2 |= ug_info->padAndCrc;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+
+ /* Set UPSMR */
+ upsmr = in_be32(&uf_regs->upsmr);
+ upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
+ if (rpm)
+ upsmr |= UPSMR_RPM;
+ if (r10m)
+ upsmr |= UPSMR_R10M;
+ if (tbi)
+ upsmr |= UPSMR_TBIM;
+ if (rmm)
+ upsmr |= UPSMR_RMM;
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ /* Set UTBIPAR */
+ utbipar = in_be32(&ug_regs->utbipar);
+ utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
+ if (tbi)
+ utbipar |=
+ (ug_info->phy_address +
+ ugeth->ug_info->uf_info.
+ ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
+ else
+ utbipar |=
+ (0x10 +
+ ugeth->ug_info->uf_info.
+ ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
+ out_be32(&ug_regs->utbipar, utbipar);
+
+ /* Disable autonegotiation in tbi mode, because by default it
+ comes up in autonegotiation mode. */
+ /* Note that this depends on proper setting in utbipar register. */
+ if (tbi) {
+ tbiBaseAddress = in_be32(&ug_regs->utbipar);
+ tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
+ tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
+ value =
+ ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
+ ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
+ ENET_TBI_MII_CR, value);
+ }
+
+ ret_val = init_mac_duplex_mode(1,
+ limited_to_full_duplex,
+ &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: half duplex not supported in requested configuration.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
+
+ ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: Preamble length must be between 3 and 7 inclusive.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ return 0;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the ugeth structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_geth_t *ug_regs;
+ u32 tempval;
+ struct ugeth_mii_info *mii_info = ugeth->mii_info;
+
+ ug_regs = ugeth->ug_regs;
+
+ if (mii_info->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (mii_info->duplex != ugeth->oldduplex) {
+ if (!(mii_info->duplex)) {
+ tempval = in_be32(&ug_regs->maccfg2);
+ tempval &= ~(MACCFG2_FDX);
+ out_be32(&ug_regs->maccfg2, tempval);
+
+ ugeth_info("%s: Half Duplex", dev->name);
+ } else {
+ tempval = in_be32(&ug_regs->maccfg2);
+ tempval |= MACCFG2_FDX;
+ out_be32(&ug_regs->maccfg2, tempval);
+
+ ugeth_info("%s: Full Duplex", dev->name);
+ }
+
+ ugeth->oldduplex = mii_info->duplex;
+ }
+
+ if (mii_info->speed != ugeth->oldspeed) {
+ switch (mii_info->speed) {
+ case 1000:
+#ifdef CONFIG_MPC836x
+/* FIXME: This code is for 100Mbs BUG fixing,
+remove this when it is fixed!!! */
+ if (ugeth->ug_info->enet_interface ==
+ ENET_1000_GMII)
+ /* Run the commands which initialize the PHY */
+ {
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id, 0x1b);
+ tempval |= 0x000f;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval | BMCR_RESET));
+ } else if (ugeth->ug_info->enet_interface ==
+ ENET_1000_RGMII)
+ /* Run the commands which initialize the PHY */
+ {
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id, 0x1b);
+ tempval = (tempval & ~0x000f) | 0x000b;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval | BMCR_RESET));
+ }
+ msleep(4000);
+#endif /* CONFIG_MPC8360 */
+ adjust_enet_interface(ugeth);
+ break;
+ case 100:
+ case 10:
+#ifdef CONFIG_MPC836x
+/* FIXME: This code is for 100Mbs BUG fixing,
+remove this lines when it will be fixed!!! */
+ ugeth->ug_info->enet_interface = ENET_100_RGMII;
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->dev,
+ mii_info->mii_id,
+ 0x1b);
+ tempval = (tempval & ~0x000f) | 0x000b;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->dev,
+ mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval |
+ BMCR_RESET));
+ msleep(4000);
+#endif /* CONFIG_MPC8360 */
+ adjust_enet_interface(ugeth);
+ break;
+ default:
+ ugeth_warn
+ ("%s: Ack! Speed (%d) is not 10/100/1000!",
+ dev->name, mii_info->speed);
+ break;
+ }
+
+ ugeth_info("%s: Speed %dBT", dev->name,
+ mii_info->speed);
+
+ ugeth->oldspeed = mii_info->speed;
+ }
+
+ if (!ugeth->oldlink) {
+ ugeth_info("%s: Link is up", dev->name);
+ ugeth->oldlink = 1;
+ netif_carrier_on(dev);
+ netif_schedule(dev);
+ }
+ } else {
+ if (ugeth->oldlink) {
+ ugeth_info("%s: Link is down", dev->name);
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+ netif_carrier_off(dev);
+ }
+ }
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct phy_info *curphy;
+ ucc_mii_mng_t *mii_regs;
+ struct ugeth_mii_info *mii_info;
+ int err;
+
+ mii_regs = &ugeth->ug_regs->miimng;
+
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+
+ mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
+
+ if (NULL == mii_info) {
+ ugeth_err("%s: Could not allocate mii_info", dev->name);
+ return -ENOMEM;
+ }
+
+ mii_info->mii_regs = mii_regs;
+ mii_info->speed = SPEED_1000;
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->pause = 0;
+ mii_info->link = 0;
+
+ mii_info->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ mii_info->autoneg = 1;
+
+ mii_info->mii_id = ugeth->ug_info->phy_address;
+
+ mii_info->dev = dev;
+
+ mii_info->mdio_read = &read_phy_reg;
+ mii_info->mdio_write = &write_phy_reg;
+
+ ugeth->mii_info = mii_info;
+
+ spin_lock_irq(&ugeth->lock);
+
+ /* Set this UCC to be the master of the MII managment */
+ ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
+
+ if (init_mii_management_configuration(1,
+ ugeth->ug_info->
+ miiPreambleSupress,
+ &mii_regs->miimcfg,
+ &mii_regs->miimind)) {
+ ugeth_err("%s: The MII Bus is stuck!", dev->name);
+ err = -1;
+ goto bus_fail;
+ }
+
+ spin_unlock_irq(&ugeth->lock);
+
+ /* get info for this PHY */
+ curphy = get_phy_info(ugeth->mii_info);
+
+ if (curphy == NULL) {
+ ugeth_err("%s: No PHY found", dev->name);
+ err = -1;
+ goto no_phy;
+ }
+
+ mii_info->phyinfo = curphy;
+
+ /* Run the commands which initialize the PHY */
+ if (curphy->init) {
+ err = curphy->init(ugeth->mii_info);
+ if (err)
+ goto phy_init_fail;
+ }
+
+ return 0;
+
+ phy_init_fail:
+ no_phy:
+ bus_fail:
+ kfree(mii_info);
+
+ return err;
+}
+
+#ifdef CONFIG_UGETH_TX_ON_DEMOND
+static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_transmit_on_demand(ugeth->uccf);
+
+ return 0;
+}
+#endif
+
+static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+ u32 temp;
+
+ uccf = ugeth->uccf;
+
+ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
+ temp = in_be32(uccf->p_uccm);
+ temp &= ~UCCE_GRA;
+ out_be32(uccf->p_uccm, temp);
+ out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ temp = in_be32(uccf->p_ucce);
+ } while (!(temp & UCCE_GRA));
+
+ uccf->stopped_tx = 1;
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+ u8 temp;
+
+ uccf = ugeth->uccf;
+
+ /* Clear acknowledge bit */
+ temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ ugeth->p_rx_glbl_pram->rxgstpack = temp;
+
+ /* Keep issuing command and checking acknowledge bit until
+ it is asserted, according to spec */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
+ ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+
+ temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
+
+ uccf->stopped_rx = 1;
+
+ return 0;
+}
+
+static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_tx = 0;
+
+ return 0;
+}
+
+static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_rx = 0;
+
+ return 0;
+}
+
+static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+ int enabled_tx, enabled_rx;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ enabled_tx = uccf->enabled_tx;
+ enabled_rx = uccf->enabled_rx;
+
+ /* Get Tx and Rx going again, in case this channel was actively
+ disabled. */
+ if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
+ ugeth_restart_tx(ugeth);
+ if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
+ ugeth_restart_rx(ugeth);
+
+ ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
+
+ return 0;
+
+}
+
+static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
+ ugeth_graceful_stop_tx(ugeth);
+
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
+ ugeth_graceful_stop_rx(ugeth);
+
+ ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
+
+ return 0;
+}
+
+static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
+{
+#ifdef DEBUG
+ ucc_fast_dump_regs(ugeth->uccf);
+ dump_regs(ugeth);
+ dump_bds(ugeth);
+#endif
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
+ p_UccGethTadParams,
+ qe_fltr_tad_t *qe_fltr_tad)
+{
+ u16 temp;
+
+ /* Zero serialized TAD */
+ memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
+
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
+ if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
+ (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
+ || (p_UccGethTadParams->vnontag_op !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
+ )
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
+ if (p_UccGethTadParams->reject_frame)
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
+ temp =
+ (u16) (((u16) p_UccGethTadParams->
+ vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
+ qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
+
+ qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
+ if (p_UccGethTadParams->vnontag_op ==
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
+ qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
+ qe_fltr_tad->serialized[1] |=
+ p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
+
+ qe_fltr_tad->serialized[2] |=
+ p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
+ /* upper bits */
+ qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
+ /* lower bits */
+ qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
+
+ return 0;
+}
+
+static enet_addr_container_t
+ *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ enet_addr_container_t *enet_addr_cont;
+ struct list_head *p_lh;
+ u16 i, num;
+ int32_t j;
+ u8 *p_counter;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ if (!p_lh)
+ return NULL;
+
+ num = *p_counter;
+
+ for (i = 0; i < num; i++) {
+ enet_addr_cont =
+ (enet_addr_container_t *)
+ ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
+ for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
+ if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
+ break;
+ if (j == 0)
+ return enet_addr_cont; /* Found */
+ }
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ }
+ return NULL;
+}
+
+static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_enet_address_recognition_location_e location;
+ enet_addr_container_t *enet_addr_cont;
+ struct list_head *p_lh;
+ u8 i;
+ u32 limit;
+ u8 *p_counter;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ p_lh = &ugeth->group_hash_q;
+ limit = ugeth->ug_info->maxGroupAddrInHash;
+ location =
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ p_lh = &ugeth->ind_hash_q;
+ limit = ugeth->ug_info->maxIndAddrInHash;
+ location =
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ if ((enet_addr_cont =
+ ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
+ list_add(p_lh, &enet_addr_cont->node); /* Put it back */
+ return 0;
+ }
+ if ((!p_lh) || (!(*p_counter < limit)))
+ return -EBUSY;
+ if (!(enet_addr_cont = get_enet_addr_container()))
+ return -ENOMEM;
+ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
+ (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
+ enet_addr_cont->location = location;
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ ++(*p_counter);
+
+ hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
+
+ return 0;
+}
+
+static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ enet_addr_container_t *enet_addr_cont;
+ ucc_fast_private_t *uccf;
+ comm_dir_e comm_dir;
+ u16 i, num;
+ struct list_head *p_lh;
+ u32 *addr_h, *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ if (!
+ (enet_addr_cont =
+ ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
+ return -ENOENT;
+
+ /* It's been found and removed from the CQ. */
+ /* Now destroy its container */
+ put_enet_addr_container(enet_addr_cont);
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ /* Add all remaining CQ elements back into hash */
+ num = --(*p_counter);
+ for (i = 0; i < num; i++) {
+ enet_addr_cont =
+ (enet_addr_container_t *)
+ ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
+ hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ }
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
+ ugeth,
+ enet_addr_type_e
+ enet_addr_type)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ ucc_fast_private_t *uccf;
+ comm_dir_e comm_dir;
+ struct list_head *p_lh;
+ u16 i, num;
+ u32 *addr_h, *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ } else
+ return -EINVAL;
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ if (!p_lh)
+ return 0;
+
+ num = *p_counter;
+
+ /* Delete all remaining CQ elements */
+ for (i = 0; i < num; i++)
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
+
+ *p_counter = 0;
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr,
+ u8 paddr_num)
+{
+ int i;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
+ ugeth_warn
+ ("%s: multicast address added to paddr will have no "
+ "effect - is this what you wanted?",
+ __FUNCTION__);
+
+ ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
+ /* store address in our database */
+ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
+ ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
+ /* put in hardware */
+ return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
+ u8 paddr_num)
+{
+ ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
+ return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
+}
+
+static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
+{
+ u16 i, j;
+ u8 *bd;
+
+ if (!ugeth)
+ return;
+
+ if (ugeth->uccf)
+ ucc_fast_free(ugeth->uccf);
+
+ if (ugeth->p_thread_data_tx) {
+ qe_muram_free(ugeth->thread_dat_tx_offset);
+ ugeth->p_thread_data_tx = NULL;
+ }
+ if (ugeth->p_thread_data_rx) {
+ qe_muram_free(ugeth->thread_dat_rx_offset);
+ ugeth->p_thread_data_rx = NULL;
+ }
+ if (ugeth->p_exf_glbl_param) {
+ qe_muram_free(ugeth->exf_glbl_param_offset);
+ ugeth->p_exf_glbl_param = NULL;
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ qe_muram_free(ugeth->rx_glbl_pram_offset);
+ ugeth->p_rx_glbl_pram = NULL;
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ qe_muram_free(ugeth->tx_glbl_pram_offset);
+ ugeth->p_tx_glbl_pram = NULL;
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ qe_muram_free(ugeth->send_q_mem_reg_offset);
+ ugeth->p_send_q_mem_reg = NULL;
+ }
+ if (ugeth->p_scheduler) {
+ qe_muram_free(ugeth->scheduler_offset);
+ ugeth->p_scheduler = NULL;
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX,
+ ugeth->ug_info->riscRx, 1);
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ ugeth->ug_info->riscTx, 0);
+ kfree(ugeth->p_init_enet_param_shadow);
+ ugeth->p_init_enet_param_shadow = NULL;
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(NULL,
+ BD_BUFFER_ARG(bd),
+ (BD_STATUS_AND_LENGTH(bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ if (ugeth->p_tx_bd_ring[i]) {
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->tx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->tx_bd_ring_offset[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(NULL, BD_BUFFER(bd),
+ ugeth->ug_info->
+ uf_info.
+ max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(ugeth->
+ rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->rx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+ while (!list_empty(&ugeth->group_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->group_hash_q)));
+ while (!list_empty(&ugeth->ind_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->ind_hash_q)));
+
+}
+
+static void ucc_geth_set_multi(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth;
+ struct dev_mc_list *dmi;
+ ucc_fast_t *uf_regs;
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ enet_addr_t tempaddr;
+ u8 *mcptr, *tdptr;
+ int i, j;
+
+ ugeth = netdev_priv(dev);
+
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (dev->flags & IFF_PROMISC) {
+
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ uf_regs->upsmr |= UPSMR_PRO;
+
+ } else {
+
+ uf_regs->upsmr &= ~UPSMR_PRO;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
+ } else {
+ /* Clear filter and add the addresses in the list.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
+
+ dmi = dev->mc_list;
+
+ for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
+
+ /* Only support group multicast for now.
+ */
+ if (!(dmi->dmi_addr[0] & 1))
+ continue;
+
+ /* The address in dmi_addr is LSB first,
+ * and taddr is MSB first. We have to
+ * copy bytes MSB first from dmi_addr.
+ */
+ mcptr = (u8 *) dmi->dmi_addr + 5;
+ tdptr = (u8 *) & tempaddr;
+ for (j = 0; j < 6; j++)
+ *tdptr++ = *mcptr--;
+
+ /* Ask CPM to run CRC and set bit in
+ * filter mask.
+ */
+ hw_add_addr_in_hash(ugeth, &tempaddr);
+
+ }
+ }
+ }
+}
+
+static void ucc_geth_stop(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_t *ug_regs = ugeth->ug_regs;
+ u32 tempval;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Disable the controller */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ /* Tell the kernel the link is down */
+ ugeth->mii_info->link = 0;
+ adjust_link(ugeth->dev);
+
+ /* Mask all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0x00000000);
+
+ /* Clear all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0xffffffff);
+
+ /* Disable Rx and Tx */
+ tempval = in_be32(&ug_regs->maccfg1);
+ tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+ out_be32(&ug_regs->maccfg1, tempval);
+
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ /* Clear any pending interrupts */
+ mii_clear_phy_interrupt(ugeth->mii_info);
+
+ /* Disable PHY Interrupts */
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_DISABLED);
+ }
+
+ free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
+
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
+ } else {
+ del_timer_sync(&ugeth->phy_info_timer);
+ }
+
+ ucc_geth_memclean(ugeth);
+}
+
+static int ucc_geth_startup(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ ucc_geth_init_pram_t *p_init_enet_pram;
+ ucc_fast_private_t *uccf;
+ ucc_geth_info_t *ug_info;
+ ucc_fast_info_t *uf_info;
+ ucc_fast_t *uf_regs;
+ ucc_geth_t *ug_regs;
+ int ret_val = -EINVAL;
+ u32 remoder = UCC_GETH_REMODER_INIT;
+ u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
+ u32 ifstat, i, j, size, l2qt, l3qt, length;
+ u16 temoder = UCC_GETH_TEMODER_INIT;
+ u16 test;
+ u8 function_code = 0;
+ u8 *bd, *endOfRing;
+ u8 numThreadsRxNumerical, numThreadsTxNumerical;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
+ (uf_info->bd_mem_part == MEM_PART_MURAM))) {
+ ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Rx BD lengths */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
+ (ug_info->bdRingLenRx[i] %
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
+ ugeth_err
+ ("%s: Rx BD ring length must be multiple of 4,"
+ " no smaller than 8.", __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* Tx BD lengths */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ ugeth_err
+ ("%s: Tx BD ring length must be no smaller than 2.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* mrblr */
+ if ((uf_info->max_rx_buf_length == 0) ||
+ (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
+ ugeth_err
+ ("%s: max_rx_buf_length must be non-zero multiple of 128.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* num Tx queues */
+ if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* num Rx queues */
+ if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* l2qt */
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
+ if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ ugeth_err
+ ("%s: VLAN priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* l3qt */
+ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
+ if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ ugeth_err
+ ("%s: IP priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ if (ug_info->cam && !ug_info->ecamptr) {
+ ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if ((ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
+ && ug_info->rxExtendedFiltering) {
+ ugeth_err("%s: Number of station addresses greater than 1 "
+ "not allowed in extended parsing mode.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Generate uccm_mask for receive */
+ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
+
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
+ /* Initialize the general fast UCC block. */
+ if (ucc_fast_init(uf_info, &uccf)) {
+ ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->uccf = uccf;
+
+ switch (ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ break;
+ }
+
+ switch (ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ break;
+ }
+
+ /* Calculate rx_extended_features */
+ ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
+ ug_info->ipAddressAlignment ||
+ (ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
+
+ ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
+ (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
+ || (ug_info->vlanOperationNonTagged !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+
+ uf_regs = uccf->uf_regs;
+ ug_regs = (ucc_geth_t *) (uccf->uf_regs);
+ ugeth->ug_regs = ug_regs;
+
+ init_default_reg_vals(&uf_regs->upsmr,
+ &ug_regs->maccfg1, &ug_regs->maccfg2);
+
+ /* Set UPSMR */
+ /* For more details see the hardware spec. */
+ init_rx_parameters(ug_info->bro,
+ ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
+
+ /* We're going to ignore other registers for now, */
+ /* except as needed to get up and running */
+
+ /* Set MACCFG1 */
+ /* For more details see the hardware spec. */
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &uf_regs->upsmr,
+ &ug_regs->uempr, &ug_regs->maccfg1);
+
+ maccfg1 = in_be32(&ug_regs->maccfg1);
+ maccfg1 |= MACCFG1_ENABLE_RX;
+ maccfg1 |= MACCFG1_ENABLE_TX;
+ out_be32(&ug_regs->maccfg1, maccfg1);
+
+ /* Set IPGIFG */
+ /* For more details see the hardware spec. */
+ ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
+ ug_info->nonBackToBackIfgPart2,
+ ug_info->
+ miminumInterFrameGapEnforcement,
+ ug_info->backToBackInterFrameGap,
+ &ug_regs->ipgifg);
+ if (ret_val != 0) {
+ ugeth_err("%s: IPGIFG initialization parameter too large.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Set HAFDUP */
+ /* For more details see the hardware spec. */
+ ret_val = init_half_duplex_params(ug_info->altBeb,
+ ug_info->backPressureNoBackoff,
+ ug_info->noBackoff,
+ ug_info->excessDefer,
+ ug_info->altBebTruncation,
+ ug_info->maxRetransmission,
+ ug_info->collisionWindow,
+ &ug_regs->hafdup);
+ if (ret_val != 0) {
+ ugeth_err("%s: Half Duplex initialization parameter too large.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Set IFSTAT */
+ /* For more details see the hardware spec. */
+ /* Read only - resets upon read */
+ ifstat = in_be32(&ug_regs->ifstat);
+
+ /* Clear UEMPR */
+ /* For more details see the hardware spec. */
+ out_be32(&ug_regs->uempr, 0);
+
+ /* Set UESCR */
+ /* For more details see the hardware spec. */
+ init_hw_statistics_gathering_mode((ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
+ 0, &uf_regs->upsmr, &ug_regs->uescr);
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Allocate in multiple of
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
+ according to spec */
+ length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
+ / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_TX_BD_RING_ALIGNMENT;
+ ugeth->tx_bd_ring_offset[j] =
+ (u32) (kmalloc((u32) (length + align),
+ GFP_KERNEL));
+ if (ugeth->tx_bd_ring_offset[j] != 0)
+ ugeth->p_tx_bd_ring[j] =
+ (void*)((ugeth->tx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->tx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_TX_BD_RING_ALIGNMENT);
+ if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
+ ugeth->p_tx_bd_ring[j] =
+ (u8 *) qe_muram_addr(ugeth->
+ tx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_tx_bd_ring[j]) {
+ ugeth_err
+ ("%s: Can not allocate memory for Tx bd rings.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
+ length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
+ }
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ ugeth->rx_bd_ring_offset[j] =
+ (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
+ if (ugeth->rx_bd_ring_offset[j] != 0)
+ ugeth->p_rx_bd_ring[j] =
+ (void*)((ugeth->rx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->rx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_RX_BD_RING_ALIGNMENT);
+ if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
+ ugeth->p_rx_bd_ring[j] =
+ (u8 *) qe_muram_addr(ugeth->
+ rx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_rx_bd_ring[j]) {
+ ugeth_err
+ ("%s: Can not allocate memory for Rx bd rings.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] =
+ (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenTx[j],
+ GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ ugeth_err("%s: Could not allocate tx_skbuff",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
+ ugeth->tx_skbuff[j][i] = NULL;
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ BD_BUFFER_CLEAR(bd);
+ BD_STATUS_AND_LENGTH_SET(bd, 0);
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+ bd -= UCC_GETH_SIZE_OF_BD;
+ BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] =
+ (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenRx[j],
+ GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ ugeth_err("%s: Could not allocate rx_skbuff",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
+ ugeth->rx_skbuff[j][i] = NULL;
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ BD_STATUS_AND_LENGTH_SET(bd, R_I);
+ BD_BUFFER_CLEAR(bd);
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+ bd -= UCC_GETH_SIZE_OF_BD;
+ BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
+ }
+
+ /*
+ * Global PRAM
+ */
+ /* Tx global PRAM */
+ /* Allocate global tx parameter RAM page */
+ ugeth->tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
+ UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_glbl_pram =
+ (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
+ tx_glbl_pram_offset);
+ /* Zero out p_tx_glbl_pram */
+ memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
+
+ /* Fill global PRAM */
+
+ /* TQPTR */
+ /* Size varies with number of Tx threads */
+ ugeth->thread_dat_tx_offset =
+ qe_muram_alloc(numThreadsTxNumerical *
+ sizeof(ucc_geth_thread_data_tx_t) +
+ 32 * (numThreadsTxNumerical == 1),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_tx =
+ (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
+ thread_dat_tx_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
+
+ /* vtagtable */
+ for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
+ out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
+ ug_info->vtagtable[i]);
+
+ /* iphoffset */
+ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
+ ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
+
+ /* SQPTR */
+ /* Size varies with number of Tx queues */
+ ugeth->send_q_mem_reg_offset =
+ qe_muram_alloc(ug_info->numQueuesTx *
+ sizeof(ucc_geth_send_queue_qd_t),
+ UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_send_q_mem_reg =
+ (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
+ send_q_mem_reg_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ endOfRing =
+ ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
+ 1) * UCC_GETH_SIZE_OF_BD;
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) immrbar_virt_to_phys(endOfRing));
+ }
+ }
+
+ /* schedulerbasepointer */
+
+ if (ug_info->numQueuesTx > 1) {
+ /* scheduler exists only if more than 1 tx queue */
+ ugeth->scheduler_offset =
+ qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
+ UCC_GETH_SCHEDULER_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_scheduler.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_scheduler =
+ (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
+ scheduler_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ ugeth->scheduler_offset);
+ /* Zero out p_scheduler */
+ memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
+
+ /* Set values in scheduler */
+ out_be32(&ugeth->p_scheduler->mblinterval,
+ ug_info->mblinterval);
+ out_be16(&ugeth->p_scheduler->nortsrbytetime,
+ ug_info->nortsrbytetime);
+ ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
+ ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
+ ugeth->p_scheduler->txasap = ug_info->txasap;
+ ugeth->p_scheduler->extrabw = ug_info->extrabw;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ ugeth->p_scheduler->weightfactor[i] =
+ ug_info->weightfactor[i];
+
+ /* Set pointers to cpucount registers in scheduler */
+ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
+ ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
+ ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
+ ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
+ ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
+ ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
+ ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
+ ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
+ }
+
+ /* schedulerbasepointer */
+ /* TxRMON_PTR (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ ugeth->tx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (ucc_geth_tx_firmware_statistics_pram_t),
+ UCC_GETH_TX_STATISTICS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_tx_fw_statistics_pram.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_fw_statistics_pram =
+ (ucc_geth_tx_firmware_statistics_pram_t *)
+ qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
+ /* Zero out p_tx_fw_statistics_pram */
+ memset(ugeth->p_tx_fw_statistics_pram,
+ 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
+ }
+
+ /* temoder */
+ /* Already has speed set */
+
+ if (ug_info->numQueuesTx > 1)
+ temoder |= TEMODER_SCHEDULER_ENABLE;
+ if (ug_info->ipCheckSumGenerate)
+ temoder |= TEMODER_IP_CHECKSUM_GENERATE;
+ temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
+
+ test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
+
+ /* Function code register value to be used later */
+ function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
+ /* Required for QE */
+
+ /* function code register */
+ out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
+
+ /* Rx global PRAM */
+ /* Allocate global rx parameter RAM page */
+ ugeth->rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
+ UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_glbl_pram =
+ (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
+ rx_glbl_pram_offset);
+ /* Zero out p_rx_glbl_pram */
+ memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
+
+ /* Fill global PRAM */
+
+ /* RQPTR */
+ /* Size varies with number of Rx threads */
+ ugeth->thread_dat_rx_offset =
+ qe_muram_alloc(numThreadsRxNumerical *
+ sizeof(ucc_geth_thread_data_rx_t),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_rx =
+ (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
+ thread_dat_rx_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
+
+ /* typeorlen */
+ out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
+
+ /* rxrmonbaseptr (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ ugeth->rx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (ucc_geth_rx_firmware_statistics_pram_t),
+ UCC_GETH_RX_STATISTICS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_fw_statistics_pram.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_fw_statistics_pram =
+ (ucc_geth_rx_firmware_statistics_pram_t *)
+ qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
+ /* Zero out p_rx_fw_statistics_pram */
+ memset(ugeth->p_rx_fw_statistics_pram, 0,
+ sizeof(ucc_geth_rx_firmware_statistics_pram_t));
+ }
+
+ /* intCoalescingPtr */
+
+ /* Size varies with number of Rx queues */
+ ugeth->rx_irq_coalescing_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
+ UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_irq_coalescing_tbl.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_irq_coalescing_tbl =
+ (ucc_geth_rx_interrupt_coalescing_table_t *)
+ qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ ugeth->rx_irq_coalescing_tbl_offset);
+
+ /* Fill interrupt coalescing table */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingmaxvalue,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingcounter,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ }
+
+ /* MRBLR */
+ init_max_rx_buff_len(uf_info->max_rx_buf_length,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MFLR */
+ out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
+ /* MINFLR */
+ init_min_frame_len(ug_info->minFrameLength,
+ &ugeth->p_rx_glbl_pram->minflr,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MAXD1 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
+ /* MAXD2 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
+
+ /* l2qt */
+ l2qt = 0;
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
+ l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
+
+ /* l3qt */
+ for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
+ l3qt = 0;
+ for (i = 0; i < 8; i++)
+ l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
+ }
+
+ /* vlantype */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
+
+ /* vlantci */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
+
+ /* ecamptr */
+ out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
+
+ /* RBDQPTR */
+ /* Size varies with number of Rx queues */
+ ugeth->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ (sizeof(ucc_geth_rx_bd_queues_entry_t) +
+ sizeof(ucc_geth_rx_prefetched_bds_t)),
+ UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_bd_qs_tbl =
+ (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
+ rx_bd_qs_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
+ /* Zero out p_rx_bd_qs_tbl */
+ memset(ugeth->p_rx_bd_qs_tbl,
+ 0,
+ ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
+ sizeof(ucc_geth_rx_prefetched_bds_t)));
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_rx_bd_ring[i]));
+ }
+ /* rest of fields handled by QE */
+ }
+
+ /* remoder */
+ /* Already has speed set */
+
+ if (ugeth->rx_extended_features)
+ remoder |= REMODER_RX_EXTENDED_FEATURES;
+ if (ug_info->rxExtendedFiltering)
+ remoder |= REMODER_RX_EXTENDED_FILTERING;
+ if (ug_info->dynamicMaxFrameLength)
+ remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
+ if (ug_info->dynamicMinFrameLength)
+ remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
+ remoder |=
+ ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
+ remoder |=
+ ug_info->
+ vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
+ remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
+ remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ if (ug_info->ipCheckSumCheck)
+ remoder |= REMODER_IP_CHECKSUM_CHECK;
+ if (ug_info->ipAddressAlignment)
+ remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
+ out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
+
+ /* Note that this function must be called */
+ /* ONLY AFTER p_tx_fw_statistics_pram */
+ /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
+ init_firmware_statistics_gathering_mode((ug_info->
+ statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
+ (ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
+ &ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ ugeth->tx_fw_statistics_pram_offset,
+ &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ ugeth->rx_fw_statistics_pram_offset,
+ &ugeth->p_tx_glbl_pram->temoder,
+ &ugeth->p_rx_glbl_pram->remoder);
+
+ /* function code register */
+ ugeth->p_rx_glbl_pram->rstate = function_code;
+
+ /* initialize extended filtering */
+ if (ug_info->rxExtendedFiltering) {
+ if (!ug_info->extendedFilteringChainPointer) {
+ ugeth_err("%s: Null Extended Filtering Chain Pointer.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for extended filtering Mode Global
+ Parameters */
+ ugeth->exf_glbl_param_offset =
+ qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
+ UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_exf_glbl_param.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_exf_glbl_param =
+ (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
+ exf_glbl_param_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ ugeth->exf_glbl_param_offset);
+ out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
+ (u32) ug_info->extendedFilteringChainPointer);
+
+ } else { /* initialize 82xx style address filtering */
+
+ /* Init individual address recognition registers to disabled */
+
+ for (j = 0; j < NUM_OF_PADDRS; j++)
+ ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
+
+ /* Create CQs for hash tables */
+ if (ug_info->maxGroupAddrInHash > 0) {
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ }
+ if (ug_info->maxIndAddrInHash > 0) {
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+ }
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_GROUP);
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_INDIVIDUAL);
+ }
+
+ /*
+ * Initialize UCC at QE level
+ */
+
+ command = QE_INIT_TX_RX;
+
+ /* Allocate shadow InitEnet command parameter structure.
+ * This is needed because after the InitEnet command is executed,
+ * the structure in DPRAM is released, because DPRAM is a premium
+ * resource.
+ * This shadow structure keeps a copy of what was done so that the
+ * allocated resources can be released when the channel is freed.
+ */
+ if (!(ugeth->p_init_enet_param_shadow =
+ (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
+ GFP_KERNEL))) {
+ ugeth_err
+ ("%s: Can not allocate memory for"
+ " p_UccInitEnetParamShadows.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ /* Zero out *p_init_enet_param_shadow */
+ memset((char *)ugeth->p_init_enet_param_shadow,
+ 0, sizeof(ucc_geth_init_pram_t));
+
+ /* Fill shadow InitEnet command parameter structure */
+
+ ugeth->p_init_enet_param_shadow->resinit1 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ ugeth->p_init_enet_param_shadow->resinit2 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ ugeth->p_init_enet_param_shadow->resinit3 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ ugeth->p_init_enet_param_shadow->resinit4 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ ugeth->p_init_enet_param_shadow->resinit5 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT5;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
+
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ if ((ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
+ && (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ && (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+ ugeth_err("%s: Invalid largest External Lookup Key Size.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ }
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
+ ug_info->largestexternallookupkeysize;
+ size = sizeof(ucc_geth_thread_rx_pram_t);
+ if (ug_info->rxExtendedFiltering) {
+ size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
+ p_init_enet_param_shadow->rxthread[0]),
+ (u8) (numThreadsRxNumerical + 1)
+ /* Rx needs one extra for terminator */
+ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
+ ug_info->riscRx, 1)) != 0) {
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ ugeth->p_init_enet_param_shadow->txglobal =
+ ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ if ((ret_val =
+ fill_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]), numThreadsTxNumerical,
+ sizeof(ucc_geth_thread_tx_pram_t),
+ UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
+ ug_info->riscTx, 0)) != 0) {
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Load Rx bds with buffers */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
+ ugeth_err("%s: Can not fill Rx bds with buffers.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+ }
+
+ /* Allocate InitEnet command parameter structure */
+ init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
+ if (IS_MURAM_ERR(init_enet_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ p_init_enet_pram =
+ (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
+
+ /* Copy shadow InitEnet command parameter structure into PRAM */
+ p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
+ p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
+ p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
+ p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
+ out_be16(&p_init_enet_pram->resinit5,
+ ugeth->p_init_enet_param_shadow->resinit5);
+ p_init_enet_pram->largestexternallookupkeysize =
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
+ out_be32(&p_init_enet_pram->rgftgfrxglobal,
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
+ out_be32(&p_init_enet_pram->rxthread[i],
+ ugeth->p_init_enet_param_shadow->rxthread[i]);
+ out_be32(&p_init_enet_pram->txglobal,
+ ugeth->p_init_enet_param_shadow->txglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
+ out_be32(&p_init_enet_pram->txthread[i],
+ ugeth->p_init_enet_param_shadow->txthread[i]);
+
+ /* Issue QE command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ init_enet_pram_offset);
+
+ /* Free InitEnet command parameter */
+ qe_muram_free(init_enet_pram_offset);
+
+ return 0;
+}
+
+/* returns a net_device_stats structure pointer */
+static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ return &(ugeth->stats);
+}
+
+/* ucc_geth_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem. */
+static void ucc_geth_timeout(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ugeth->stats.tx_errors++;
+
+ ugeth_dump_regs(ugeth);
+
+ if (dev->flags & IFF_UP) {
+ ucc_geth_stop(ugeth);
+ ucc_geth_startup(ugeth);
+ }
+
+ netif_schedule(dev);
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ u8 *bd; /* BD pointer */
+ u32 bd_status;
+ u8 txQ = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ ugeth->stats.tx_bytes += skb->len;
+
+ /* Start from the next BD that should be filled */
+ bd = ugeth->txBd[txQ];
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ /* Save the skb pointer so we can free it later */
+ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ ugeth->skb_curtx[txQ] =
+ (ugeth->skb_curtx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* set up the buffer descriptor */
+ BD_BUFFER_SET(bd,
+ dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
+
+ //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
+
+ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
+
+ BD_STATUS_AND_LENGTH_SET(bd, bd_status);
+
+ dev->trans_start = jiffies;
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W))
+ ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
+ else
+ ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (bd == ugeth->confBd[txQ]) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ if (ugeth->p_scheduler) {
+ ugeth->cpucount[txQ]++;
+ /* Indicate to QE that there are more Tx bds ready for
+ transmission */
+ /* This is done by writing a running counter of the bd
+ count to the scheduler PRAM. */
+ out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
+ }
+
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
+static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
+{
+ struct sk_buff *skb;
+ u8 *bd;
+ u16 length, howmany = 0;
+ u32 bd_status;
+ u8 *bdBuffer;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock(&ugeth->lock);
+ /* collect received buffers */
+ bd = ugeth->rxBd[rxQ];
+
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
+ bdBuffer = (u8 *) BD_BUFFER(bd);
+ length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
+ skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
+
+ /* determine whether buffer is first, last, first and last
+ (single buffer frame) or middle (not first and not last) */
+ if (!skb ||
+ (!(bd_status & (R_F | R_L))) ||
+ (bd_status & R_ERRORS_FATAL)) {
+ ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
+ __FUNCTION__, __LINE__, (u32) skb);
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ ugeth->stats.rx_dropped++;
+ } else {
+ ugeth->stats.rx_packets++;
+ howmany++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, ugeth->dev);
+
+ ugeth->stats.rx_bytes += length;
+ /* Send the packet up the stack */
+#ifdef CONFIG_UGETH_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif /* CONFIG_UGETH_NAPI */
+ }
+
+ ugeth->dev->last_rx = jiffies;
+
+ skb = get_new_skb(ugeth, bd);
+ if (!skb) {
+ ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
+ spin_unlock(&ugeth->lock);
+ ugeth->stats.rx_dropped++;
+ break;
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
+
+ /* update to point at the next skb */
+ ugeth->skb_currx[rxQ] =
+ (ugeth->skb_currx[rxQ] +
+ 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
+
+ if (bd_status & R_W)
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ else
+ bd += UCC_GETH_SIZE_OF_BD;
+
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ }
+
+ ugeth->rxBd[rxQ] = bd;
+ spin_unlock(&ugeth->lock);
+ return howmany;
+}
+
+static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+{
+ /* Start from the next BD that should be filled */
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ u8 *bd; /* BD pointer */
+ u32 bd_status;
+
+ bd = ugeth->confBd[txQ];
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+
+ /* Normal processing. */
+ while ((bd_status & T_R) == 0) {
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ break;
+
+ ugeth->stats.tx_packets++;
+
+ /* Free the sk buffer associated with this TxBD */
+ dev_kfree_skb_irq(ugeth->
+ tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+ (ugeth->skb_dirtytx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W))
+ ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
+ else
+ ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ }
+ return 0;
+}
+
+#ifdef CONFIG_UGETH_NAPI
+static int ucc_geth_poll(struct net_device *dev, int *budget)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ int howmany;
+ int rx_work_limit = *budget;
+ u8 rxQ = 0;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+ howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
+
+ dev->quota -= howmany;
+ rx_work_limit -= howmany;
+ *budget -= howmany;
+
+ if (rx_work_limit >= 0)
+ netif_rx_complete(dev);
+
+ return (rx_work_limit < 0) ? 1 : 0;
+}
+#endif /* CONFIG_UGETH_NAPI */
+
+static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)info;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_fast_private_t *uccf;
+ ucc_geth_info_t *ug_info;
+ register u32 ucce = 0;
+ register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
+ register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
+ register u8 i;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ if (!ugeth)
+ return IRQ_NONE;
+
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+
+ do {
+ ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
+
+ /* clear event bits for next time */
+ /* Side effect here is to mask ucce variable
+ for future processing below. */
+ out_be32(uccf->p_ucce, ucce); /* Clear with ones,
+ but only bits in UCCM */
+
+ /* We ignore Tx interrupts because Tx confirmation is
+ done inside Tx routine */
+
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ucce & bit_mask)
+ ucc_geth_rx(ugeth, i,
+ (int)ugeth->ug_info->
+ bdRingLenRx[i]);
+ ucce &= ~bit_mask;
+ bit_mask <<= 1;
+ }
+
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ucce & tx_mask)
+ ucc_geth_tx(dev, i);
+ ucce &= ~tx_mask;
+ tx_mask <<= 1;
+ }
+
+ /* Exceptions */
+ if (ucce & UCCE_BSY) {
+ ugeth_vdbg("Got BUSY irq!!!!");
+ ugeth->stats.rx_errors++;
+ ucce &= ~UCCE_BSY;
+ }
+ if (ucce & UCCE_OTHER) {
+ ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
+ ucce);
+ ugeth->stats.rx_errors++;
+ ucce &= ~ucce;
+ }
+ }
+ while (ucce);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupt */
+ mii_clear_phy_interrupt(ugeth->mii_info);
+
+ /* Disable PHY interrupts */
+ mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
+
+ /* Schedule the phy change */
+ schedule_work(&ugeth->tq);
+
+ return IRQ_HANDLED;
+}
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void ugeth_phy_change(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_geth_t *ug_regs;
+ int result = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_regs = ugeth->ug_regs;
+
+ /* Delay to give the PHY a chance to change the
+ * register state */
+ msleep(1);
+
+ /* Update the link, speed, duplex */
+ result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
+
+ /* Adjust the known status as long as the link
+ * isn't still coming up */
+ if ((0 == result) || (ugeth->mii_info->link == 0))
+ adjust_link(dev);
+
+ /* Reenable interrupts, if needed */
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_ENABLED);
+}
+
+/* Called every so often on systems that don't interrupt
+ * the core for PHY changes */
+static void ugeth_phy_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ schedule_work(&ugeth->tq);
+
+ mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
+}
+
+/* Keep trying aneg for some time
+ * If, after GFAR_AN_TIMEOUT seconds, it has not
+ * finished, we switch to forced.
+ * Either way, once the process has completed, we either
+ * request the interrupt, or switch the timer over to
+ * using ugeth_phy_timer to check status */
+static void ugeth_phy_startup_timer(unsigned long data)
+{
+ struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
+ static int secondary = UGETH_AN_TIMEOUT;
+ int result;
+
+ /* Configure the Auto-negotiation */
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* If autonegotiation failed to start, and
+ * we haven't timed out, reset the timer, and return */
+ if (result && secondary--) {
+ mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
+ return;
+ } else if (result) {
+ /* Couldn't start autonegotiation.
+ * Try switching to forced */
+ mii_info->autoneg = 0;
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* Forcing failed! Give up */
+ if (result) {
+ ugeth_err("%s: Forcing failed!", mii_info->dev->name);
+ return;
+ }
+ }
+
+ /* Kill the timer so it can be restarted */
+ del_timer_sync(&ugeth->phy_info_timer);
+
+ /* Grab the PHY interrupt, if necessary/possible */
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ if (request_irq(ugeth->ug_info->phy_interrupt,
+ phy_interrupt,
+ SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
+ ugeth_err("%s: Can't get IRQ %d (PHY)",
+ mii_info->dev->name,
+ ugeth->ug_info->phy_interrupt);
+ } else {
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_ENABLED);
+ return;
+ }
+ }
+
+ /* Start the timer again, this time in order to
+ * handle a change in status */
+ init_timer(&ugeth->phy_info_timer);
+ ugeth->phy_info_timer.function = &ugeth_phy_timer;
+ ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
+ mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int ucc_geth_open(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ int err;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Test station address */
+ if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
+ ugeth_err("%s: Multicast address used for station address"
+ " - is this what you wanted?", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ err = ucc_geth_startup(ugeth);
+ if (err) {
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ return err;
+ }
+
+ err = adjust_enet_interface(ugeth);
+ if (err) {
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ return err;
+ }
+
+ /* Set MACSTNADDR1, MACSTNADDR2 */
+ /* For more details see the hardware spec. */
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+
+ err = init_phy(dev);
+ if (err) {
+ ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
+ return err;
+ }
+#ifndef CONFIG_UGETH_NAPI
+ err =
+ request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
+ "UCC Geth", dev);
+ if (err) {
+ ugeth_err("%s: Cannot get IRQ for net device, aborting.",
+ dev->name);
+ ucc_geth_stop(ugeth);
+ return err;
+ }
+#endif /* CONFIG_UGETH_NAPI */
+
+ /* Set up the PHY change work queue */
+ INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
+
+ init_timer(&ugeth->phy_info_timer);
+ ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
+ ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
+ mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
+
+ err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ if (err) {
+ ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
+ ucc_geth_stop(ugeth);
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int ucc_geth_close(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ucc_geth_stop(ugeth);
+
+ /* Shutdown the PHY */
+ if (ugeth->mii_info->phyinfo->close)
+ ugeth->mii_info->phyinfo->close(ugeth->mii_info);
+
+ kfree(ugeth->mii_info);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+struct ethtool_ops ucc_geth_ethtool_ops = {
+ .get_settings = NULL,
+ .get_drvinfo = NULL,
+ .get_regs_len = NULL,
+ .get_regs = NULL,
+ .get_link = NULL,
+ .get_coalesce = NULL,
+ .set_coalesce = NULL,
+ .get_ringparam = NULL,
+ .set_ringparam = NULL,
+ .get_strings = NULL,
+ .get_stats_count = NULL,
+ .get_ethtool_stats = NULL,
+};
+
+static int ucc_geth_probe(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct ucc_geth_platform_data *ugeth_pdata;
+ struct net_device *dev = NULL;
+ struct ucc_geth_private *ugeth = NULL;
+ struct ucc_geth_info *ug_info;
+ int err;
+ static int mii_mng_configured = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
+
+ ug_info = &ugeth_info[pdev->id];
+ ug_info->uf_info.ucc_num = pdev->id;
+ ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
+ ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
+ ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
+ ug_info->uf_info.irq = platform_get_irq(pdev, 0);
+ ug_info->phy_address = ugeth_pdata->phy_id;
+ ug_info->enet_interface = ugeth_pdata->phy_interface;
+ ug_info->board_flags = ugeth_pdata->board_flags;
+ ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
+
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
+
+ if (ug_info == NULL) {
+ ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
+ pdev->id);
+ return -ENODEV;
+ }
+
+ if (!mii_mng_configured) {
+ ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
+ mii_mng_configured = 1;
+ }
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof(*ugeth));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ ugeth = netdev_priv(dev);
+ spin_lock_init(&ugeth->lock);
+
+ dev_set_drvdata(device, dev);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ dev->open = ucc_geth_open;
+ dev->hard_start_xmit = ucc_geth_start_xmit;
+ dev->tx_timeout = ucc_geth_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_UGETH_NAPI
+ dev->poll = ucc_geth_poll;
+ dev->weight = UCC_GETH_DEV_WEIGHT;
+#endif /* CONFIG_UGETH_NAPI */
+ dev->stop = ucc_geth_close;
+ dev->get_stats = ucc_geth_get_stats;
+// dev->change_mtu = ucc_geth_change_mtu;
+ dev->mtu = 1500;
+ dev->set_multicast_list = ucc_geth_set_multi;
+ dev->ethtool_ops = &ucc_geth_ethtool_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ ugeth_err("%s: Cannot register net device, aborting.",
+ dev->name);
+ free_netdev(dev);
+ return err;
+ }
+
+ ugeth->ug_info = ug_info;
+ ugeth->dev = dev;
+ memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
+
+ return 0;
+}
+
+static int ucc_geth_remove(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ dev_set_drvdata(device, NULL);
+ ucc_geth_memclean(ugeth);
+ free_netdev(dev);
+
+ return 0;
+}
+
+/* Structure for a device driver */
+static struct device_driver ucc_geth_driver = {
+ .name = DRV_NAME,
+ .bus = &platform_bus_type,
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
+};
+
+static int __init ucc_geth_init(void)
+{
+ int i;
+ printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
+ for (i = 0; i < 8; i++)
+ memcpy(&(ugeth_info[i]), &ugeth_primary_info,
+ sizeof(ugeth_primary_info));
+
+ return driver_register(&ucc_geth_driver);
+}
+
+static void __exit ucc_geth_exit(void)
+{
+ driver_unregister(&ucc_geth_driver);
+}
+
+module_init(ucc_geth_init);
+module_exit(ucc_geth_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
new file mode 100644
index 000000000000..005965f5dd9b
--- /dev/null
+++ b/drivers/net/ucc_geth.h
@@ -0,0 +1,1339 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC Gigabit Ethernet unit routines.
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_GETH_H__
+#define __UCC_GETH_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#define NUM_TX_QUEUES 8
+#define NUM_RX_QUEUES 8
+#define NUM_BDS_IN_PREFETCHED_BDS 4
+#define TX_IP_OFFSET_ENTRY_MAX 8
+#define NUM_OF_PADDRS 4
+#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
+#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
+
+typedef struct ucc_mii_mng {
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+} __attribute__ ((packed)) ucc_mii_mng_t;
+
+typedef struct ucc_geth {
+ ucc_fast_t uccf;
+
+ u32 maccfg1; /* mac configuration reg. 1 */
+ u32 maccfg2; /* mac configuration reg. 2 */
+ u32 ipgifg; /* interframe gap reg. */
+ u32 hafdup; /* half-duplex reg. */
+ u8 res1[0x10];
+ ucc_mii_mng_t miimng; /* MII management structure */
+ u32 ifctl; /* interface control reg */
+ u32 ifstat; /* interface statux reg */
+ u32 macstnaddr1; /* mac station address part 1 reg */
+ u32 macstnaddr2; /* mac station address part 2 reg */
+ u8 res2[0x8];
+ u32 uempr; /* UCC Ethernet Mac parameter reg */
+ u32 utbipar; /* UCC tbi address reg */
+ u16 uescr; /* UCC Ethernet statistics control reg */
+ u8 res3[0x180 - 0x15A];
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in succesfull
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u8 res4[0x2];
+ u32 tmca; /* Total number of frames that were transmitted
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ succesfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received succesfully
+ that had destination address equal to the
+ broadcast address */
+ u32 scar; /* Statistics carry register */
+ u32 scam; /* Statistics caryy mask register */
+ u8 res5[0x200 - 0x1c4];
+} __attribute__ ((packed)) ucc_geth_t;
+
+/* UCC GETH TEMODR Register */
+#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
+ */
+#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
+ checksums */
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
+ optimization
+ enhancement (mode1) */
+#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
+ */
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
+ shift */
+
+/* UCC GETH TEMODR Register */
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
+ statistics */
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
+ extended
+ features */
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
+ tagged << shift */
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
+ tagged << shift */
+#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
+ */
+#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
+ statistics */
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
+ filtering
+ vs.
+ mpc82xx-like
+ filtering */
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
+ shift */
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
+ dynamic max
+ frame length
+ */
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
+ dynamic min
+ frame length
+ */
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
+ checksums */
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
+ address to
+ 4-byte
+ boundary */
+
+/* UCC GETH Event Register */
+#define UCCE_MPD 0x80000000 /* Magic packet
+ detection */
+#define UCCE_SCAR 0x40000000
+#define UCCE_GRA 0x20000000 /* Tx graceful
+ stop
+ complete */
+#define UCCE_CBPR 0x10000000
+#define UCCE_BSY 0x08000000
+#define UCCE_RXC 0x04000000
+#define UCCE_TXC 0x02000000
+#define UCCE_TXE 0x01000000
+#define UCCE_TXB7 0x00800000
+#define UCCE_TXB6 0x00400000
+#define UCCE_TXB5 0x00200000
+#define UCCE_TXB4 0x00100000
+#define UCCE_TXB3 0x00080000
+#define UCCE_TXB2 0x00040000
+#define UCCE_TXB1 0x00020000
+#define UCCE_TXB0 0x00010000
+#define UCCE_RXB7 0x00008000
+#define UCCE_RXB6 0x00004000
+#define UCCE_RXB5 0x00002000
+#define UCCE_RXB4 0x00001000
+#define UCCE_RXB3 0x00000800
+#define UCCE_RXB2 0x00000400
+#define UCCE_RXB1 0x00000200
+#define UCCE_RXB0 0x00000100
+#define UCCE_RXF7 0x00000080
+#define UCCE_RXF6 0x00000040
+#define UCCE_RXF5 0x00000020
+#define UCCE_RXF4 0x00000010
+#define UCCE_RXF3 0x00000008
+#define UCCE_RXF2 0x00000004
+#define UCCE_RXF1 0x00000002
+#define UCCE_RXF0 0x00000001
+
+#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
+#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
+
+#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
+ UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
+#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
+ UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
+#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
+ UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
+#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
+ UCCE_RXC | UCCE_TXC | UCCE_TXE)
+
+/* UCC GETH UPSMR (Protocol Specific Mode Register) */
+#define UPSMR_ECM 0x04000000 /* Enable CAM
+ Miss or
+ Enable
+ Filtering
+ Miss */
+#define UPSMR_HSE 0x02000000 /* Hardware
+ Statistics
+ Enable */
+#define UPSMR_PRO 0x00400000 /* Promiscuous*/
+#define UPSMR_CAP 0x00200000 /* CAM polarity
+ */
+#define UPSMR_RSH 0x00100000 /* Receive
+ Short Frames
+ */
+#define UPSMR_RPM 0x00080000 /* Reduced Pin
+ Mode
+ interfaces */
+#define UPSMR_R10M 0x00040000 /* RGMII/RMII
+ 10 Mode */
+#define UPSMR_RLPB 0x00020000 /* RMII
+ Loopback
+ Mode */
+#define UPSMR_TBIM 0x00010000 /* Ten-bit
+ Interface
+ Mode */
+#define UPSMR_RMM 0x00001000 /* RMII/RGMII
+ Mode */
+#define UPSMR_CAM 0x00000400 /* CAM Address
+ Matching */
+#define UPSMR_BRO 0x00000200 /* Broadcast
+ Address */
+#define UPSMR_RES1 0x00002000 /* Reserved
+ feild - must
+ be 1 */
+
+/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
+ Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
+ Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
+ synchronized
+ to Rx stream
+ */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
+ synchronized
+ to Tx stream
+ */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
+#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
+ Length <<
+ shift */
+#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
+ Length mask */
+#define MACCFG2_SRP 0x00000080 /* Soft Receive
+ Preamble */
+#define MACCFG2_STP 0x00000040 /* Soft
+ Transmit
+ Preamble */
+#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
+ must be set
+ to 1 */
+#define MACCFG2_LC 0x00000010 /* Length Check
+ */
+#define MACCFG2_MPE 0x00000008 /* Magic packet
+ detect */
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
+ mask */
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
+ Padding
+ short frames
+ nor CRC */
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
+ only */
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
+ (MII/RMII/RGMII
+ 10/100bps) */
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
+ (GMII/TBI/RTB/RGMII
+ 1000bps ) */
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
+ covering all
+ relevant
+ bits */
+
+/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
+ back-to-back
+ inter frame
+ gap part 1.
+ << shift */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
+ back-to-back
+ inter frame
+ gap part 2.
+ << shift */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
+ Enforcement
+ << shift */
+#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
+ inter frame
+ gap << shift
+ */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 1. max val */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 2. max val */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
+ Enforcement max val */
+#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
+ frame gap max val */
+#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
+#define IPGIFG_NBTB_IPG_MASK 0x007F0000
+#define IPGIFG_MIN_IFG_MASK 0x0000FF00
+#define IPGIFG_BTB_IPG_MASK 0x0000007F
+
+/* UCC GETH HAFDUP (Half Duplex Register) */
+#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
+ Binary
+ Exponential
+ Backoff
+ Truncation
+ << shift */
+#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
+ Exponential Backoff
+ Truncation max val */
+#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
+ Binary
+ Exponential
+ Backoff */
+#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
+ pressure no
+ backoff */
+#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
+#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
+ Defer */
+#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
+ Retransmission
+ << shift */
+#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
+ Retransmission max
+ val */
+#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
+ Window <<
+ shift */
+#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
+ val */
+#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
+#define HALFDUP_RETRANS_MASK 0x0000F000
+#define HALFDUP_COL_WINDOW_MASK 0x0000003F
+
+/* UCC GETH UCCS (Ethernet Status Register) */
+#define UCCS_BPR 0x02 /* Back pressure (in
+ half duplex mode) */
+#define UCCS_PAU 0x02 /* Pause state (in full
+ duplex mode) */
+#define UCCS_MPD 0x01 /* Magic Packet
+ Detected */
+
+/* UCC GETH MIIMCFG (MII Management Configuration Register) */
+#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
+ management */
+#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
+ suppress */
+#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
+ << shift */
+#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by
+ 112 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by
+ 160 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by
+ 224 */
+
+/* UCC GETH MIIMCOM (MII Management Command Register) */
+#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
+#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
+
+/* UCC GETH MIIMADD (MII Management Address Register) */
+#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
+ << shift */
+#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
+ << shift */
+
+/* UCC GETH MIIMCON (MII Management Control Register) */
+#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
+ << shift */
+#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
+ << shift */
+
+/* UCC GETH MIIMIND (MII Management Indicator Register) */
+#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
+#define MIIMIND_SCAN 0x00000002 /* Scan in
+ progress */
+#define MIIMIND_BUSY 0x00000001
+
+/* UCC GETH IFSTAT (Interface Status Register) */
+#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
+ transmission
+ defer */
+
+/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
+#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
+ address 6th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
+ address 5th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
+ address 4th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
+ address 3rd
+ octet <<
+ shift */
+
+/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
+#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
+ address 2nd
+ octet <<
+ shift */
+#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
+ address 1st
+ octet <<
+ shift */
+
+/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
+#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
+ value <<
+ shift */
+#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
+ pause time
+ value <<
+ shift */
+
+/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
+ << shift */
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
+ mask */
+
+/* UCC GETH UESCR (Ethernet Statistics Control Register) */
+#define UESCR_AUTOZ 0x8000 /* Automatically zero
+ addressed
+ statistical counter
+ values */
+#define UESCR_CLRCNT 0x4000 /* Clear all statistics
+ counters */
+#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
+ Coalescing
+ Value <<
+ shift */
+#define UESCR_SCOV_SHIFT (15 - 15) /* Status
+ Coalescing
+ Value <<
+ shift */
+
+/* UCC GETH UDSR (Data Synchronization Register) */
+#define UDSR_MAGIC 0x067E
+
+typedef struct ucc_geth_thread_data_tx {
+ u8 res0[104];
+} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
+
+typedef struct ucc_geth_thread_data_rx {
+ u8 res0[40];
+} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
+
+/* Send Queue Queue-Descriptor */
+typedef struct ucc_geth_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address;/* initialize to last entry in BD ring */
+ u8 res1[0x30];
+} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
+
+typedef struct ucc_geth_send_queue_mem_region {
+ ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
+} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
+
+typedef struct ucc_geth_thread_tx_pram {
+ u8 res0[64];
+} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
+
+typedef struct ucc_geth_thread_rx_pram {
+ u8 res0[128];
+} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
+
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
+
+typedef struct ucc_geth_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz; /* radix 2 log value of denom. of
+ NorTSRByteTime */
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[NUM_TX_QUEUES];
+ /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70 - 0x64];
+} __attribute__ ((packed)) ucc_geth_scheduler_t;
+
+typedef struct ucc_geth_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
+
+typedef struct ucc_geth_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
+
+typedef struct ucc_geth_rx_interrupt_coalescing_entry {
+ u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
+ value */
+ u32 interruptcoalescingcounter; /* interrupt coalescing counter,
+ initialize to
+ interruptcoalescingmaxvalue */
+} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
+
+typedef struct ucc_geth_rx_interrupt_coalescing_table {
+ ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
+ /**< interrupt coalescing entry */
+} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
+
+typedef struct ucc_geth_rx_prefetched_bds {
+ qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
+
+typedef struct ucc_geth_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
+
+typedef struct ucc_geth_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38 - 0x02];
+ u32 sqptr; /* a base pointer to send queue memory region */
+ u32 schedulerbasepointer; /* a base pointer to scheduler memory
+ region */
+ u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
+ u32 tstate; /* tx internal state. High byte contains
+ function code */
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
+ u32 tqptr; /* a base pointer to the Tx Queues Memory
+ Region */
+ u8 res2[0x80 - 0x74];
+} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
+
+/* structure representing Extended Filtering Global Parameters in PRAM */
+typedef struct ucc_geth_exf_global_pram {
+ u32 l2pcdptr; /* individual address filter, high */
+ u8 res0[0x10 - 0x04];
+} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
+
+typedef struct ucc_geth_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
+ u32 res0[0x1];
+ u8 res1[0x20 - 0xC];
+ u16 typeorlen; /* cutoff point less than which, type/len field
+ is considered length */
+ u8 res2[0x1];
+ u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
+ u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
+ u8 res3[0x30 - 0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36 - 0x34];
+ u8 rstate; /* rx internal state. High byte contains
+ function code */
+ u8 res5[0x46 - 0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* base pointer to RxBD parameter table
+ description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64]; /* address filtering data structure */
+ u32 exfGlobalParam; /* base address for extended filtering global
+ parameters */
+ u8 res6[0x100 - 0xC4]; /* Initialize to zero */
+} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+/* structure representing InitEnet command */
+typedef struct ucc_geth_init_pram {
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u8 resinit4;
+ u16 resinit5;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __attribute__ ((packed)) ucc_geth_init_pram_t;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+typedef struct ucc_geth_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
+
+/* structure representing 82xx Address Filtering PRAM */
+typedef struct ucc_geth_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ ucc_geth_82xx_enet_address_t taddr;
+ ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
+ u8 res0[0x40 - 0x38];
+} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
+
+/* GETH Tx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_tx_firmware_statistics {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
+
+/* GETH Rx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_rx_firmware_statistics {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
+
+/* GETH hardware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_hardware_statistics {
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in succesfull
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u32 tmca; /* Total number of frames that were transmitted
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ succesfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received succesfully
+ that had destination address equal to the
+ broadcast address */
+} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
+
+/* UCC GETH Tx errors returned via TxConf callback */
+#define TX_ERRORS_DEF 0x0200
+#define TX_ERRORS_EXDEF 0x0100
+#define TX_ERRORS_LC 0x0080
+#define TX_ERRORS_RL 0x0040
+#define TX_ERRORS_RC_MASK 0x003C
+#define TX_ERRORS_RC_SHIFT 2
+#define TX_ERRORS_UN 0x0002
+#define TX_ERRORS_CSL 0x0001
+
+/* UCC GETH Rx errors returned via RxStore callback */
+#define RX_ERRORS_CMR 0x0200
+#define RX_ERRORS_M 0x0100
+#define RX_ERRORS_BC 0x0080
+#define RX_ERRORS_MC 0x0040
+
+/* Transmit BD. These are in addition to values defined in uccf. */
+#define T_VID 0x003c0000 /* insert VLAN id index mask. */
+#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
+#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
+#define T_LC (((u32) TX_ERRORS_LC ) << 16)
+#define T_RL (((u32) TX_ERRORS_RL ) << 16)
+#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
+#define T_UN (((u32) TX_ERRORS_UN ) << 16)
+#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
+#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
+ | T_UN | T_CSL) /* transmit errors to report */
+
+/* Receive BD. These are in addition to values defined in uccf. */
+#define R_LG 0x00200000 /* Frame length violation. */
+#define R_NO 0x00100000 /* Non-octet aligned frame. */
+#define R_SH 0x00080000 /* Short frame. */
+#define R_CR 0x00040000 /* CRC error. */
+#define R_OV 0x00020000 /* Overrun. */
+#define R_IPCH 0x00010000 /* IP checksum check failed. */
+#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
+#define R_M (((u32) RX_ERRORS_M ) << 16)
+#define R_BC (((u32) RX_ERRORS_BC ) << 16)
+#define R_MC (((u32) RX_ERRORS_MC ) << 16)
+#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
+ report */
+#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
+ R_OV | R_IPCH) /* receive errors to discard */
+
+/* Alignments */
+#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
+#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
+#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
+ based on num of
+ threads, but always
+ using the maximum is
+ easier */
+#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a
+ guess */
+#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+ is a
+ guess
+ */
+#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_MRBLR_ALIGNMENT 128
+#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
+
+#define UCC_GETH_TAD_EF 0x80
+#define UCC_GETH_TAD_V 0x40
+#define UCC_GETH_TAD_REJ 0x20
+#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
+#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
+#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
+#define UCC_GETH_TAD_RQOS_SHIFT 0
+#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
+#define UCC_GETH_TAD_CFI 0x10
+
+#define UCC_GETH_VLAN_PRIORITY_MAX 8
+#define UCC_GETH_IP_PRIORITY_MAX 64
+#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
+#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+
+#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
+
+/* Driver definitions */
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x10
+#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+
+#define ENET_NUM_OCTETS_PER_ADDRESS 6
+#define ENET_GROUP_ADDR 0x01 /* Group address mask
+ for ethernet
+ addresses */
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_CHANGE_TIME 2
+
+/* Fast Ethernet (10/100 Mbps) */
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
+ */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
+ */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 128
+/* Gigabit Ethernet (1000 Mbps) */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
+ FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
+
+#define UCC_GETH_REMODER_INIT 0 /* bits that must be
+ set */
+#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
+#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
+ for this
+ register */
+#define UCC_GETH_MACCFG1_INIT 0
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \
+ (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
+
+/* Ethernet speed */
+typedef enum enet_speed {
+ ENET_SPEED_10BT, /* 10 Base T */
+ ENET_SPEED_100BT, /* 100 Base T */
+ ENET_SPEED_1000BT /* 1000 Base T */
+} enet_speed_e;
+
+/* Ethernet Address Type. */
+typedef enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+} enet_addr_type_e;
+
+/* TBI / MII Set Register */
+typedef enum enet_tbi_mii_reg {
+ ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
+ ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
+ ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
+ ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability
+ (ANLPBPA) */
+ ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */
+ ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */
+ ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page
+ (ANLPANP) */
+ ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
+ ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
+ ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
+} enet_tbi_mii_reg_e;
+
+/* UCC GETH 82xx Ethernet Address Recognition Location */
+typedef enum ucc_geth_enet_address_recognition_location {
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
+ address */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
+ station
+ address
+ paddr1 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
+ station
+ address
+ paddr2 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
+ station
+ address
+ paddr3 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
+ station
+ address
+ paddr4 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
+ hash */
+} ucc_geth_enet_address_recognition_location_e;
+
+/* UCC GETH vlan operation tagged */
+typedef enum ucc_geth_vlan_operation_tagged {
+ UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
+ UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
+ = 0x1, /* Tagged - replace vid portion of q tag */
+ UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
+ = 0x2, /* Tagged - if vid0 replace vid with default value */
+ UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
+ = 0x3 /* Tagged - extract q tag from frame */
+} ucc_geth_vlan_operation_tagged_e;
+
+/* UCC GETH vlan operation non-tagged */
+typedef enum ucc_geth_vlan_operation_non_tagged {
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
+ q tag insert
+ */
+} ucc_geth_vlan_operation_non_tagged_e;
+
+/* UCC GETH Rx Quality of Service Mode */
+typedef enum ucc_geth_qos_mode {
+ UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
+ determined
+ by L2
+ criteria */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
+ determined
+ by L3
+ criteria */
+} ucc_geth_qos_mode_e;
+
+/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
+ for combined functionality */
+typedef enum ucc_geth_statistics_gathering_mode {
+ UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
+ statistics
+ gathering */
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
+ hardware
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
+ firmware
+ tx
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
+ firmware
+ rx
+ statistics
+ gathering
+ */
+} ucc_geth_statistics_gathering_mode_e;
+
+/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
+typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
+ UCC_GETH_PAD_AND_CRC_MODE_NONE
+ = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
+ short frames
+ nor CRC */
+ UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
+ = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
+ CRC only */
+ UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
+ MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
+} ucc_geth_maccfg2_pad_and_crc_mode_e;
+
+/* UCC GETH upsmr Flow Control Mode */
+typedef enum ucc_geth_flow_control_mode {
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
+ flow control
+ */
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
+ = 0x00004000 /* Send pause frame when RxFIFO reaches its
+ emergency threshold */
+} ucc_geth_flow_control_mode_e;
+
+/* UCC GETH number of threads */
+typedef enum ucc_geth_num_of_threads {
+ UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
+} ucc_geth_num_of_threads_e;
+
+/* UCC GETH number of station addresses */
+typedef enum ucc_geth_num_of_station_addresses {
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
+} ucc_geth_num_of_station_addresses_e;
+
+typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
+
+/* UCC GETH 82xx Ethernet Address Container */
+typedef struct enet_addr_container {
+ enet_addr_t address; /* ethernet address */
+ ucc_geth_enet_address_recognition_location_e location; /* location in
+ 82xx address
+ recognition
+ hardware */
+ struct list_head node;
+} enet_addr_container_t;
+
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
+
+/* UCC GETH Termination Action Descriptor (TAD) structure. */
+typedef struct ucc_geth_tad_params {
+ int rx_non_dynamic_extended_features_mode;
+ int reject_frame;
+ ucc_geth_vlan_operation_tagged_e vtag_op;
+ ucc_geth_vlan_operation_non_tagged_e vnontag_op;
+ ucc_geth_qos_mode_e rqos;
+ u8 vpri;
+ u16 vid;
+} ucc_geth_tad_params_t;
+
+/* GETH protocol initialization structure */
+typedef struct ucc_geth_info {
+ ucc_fast_info_t uf_info;
+ u8 numQueuesTx;
+ u8 numQueuesRx;
+ int ipCheckSumCheck;
+ int ipCheckSumGenerate;
+ int rxExtendedFiltering;
+ u32 extendedFilteringChainPointer;
+ u16 typeorlen;
+ int dynamicMaxFrameLength;
+ int dynamicMinFrameLength;
+ u8 nonBackToBackIfgPart1;
+ u8 nonBackToBackIfgPart2;
+ u8 miminumInterFrameGapEnforcement;
+ u8 backToBackInterFrameGap;
+ int ipAddressAlignment;
+ int lengthCheckRx;
+ u32 mblinterval;
+ u16 nortsrbytetime;
+ u8 fracsiz;
+ u8 strictpriorityq;
+ u8 txasap;
+ u8 extrabw;
+ int miiPreambleSupress;
+ u8 altBebTruncation;
+ int altBeb;
+ int backPressureNoBackoff;
+ int noBackoff;
+ int excessDefer;
+ u8 maxRetransmission;
+ u8 collisionWindow;
+ int pro;
+ int cap;
+ int rsh;
+ int rlpb;
+ int cam;
+ int bro;
+ int ecm;
+ int receiveFlowControl;
+ u8 maxGroupAddrInHash;
+ u8 maxIndAddrInHash;
+ u8 prel;
+ u16 maxFrameLength;
+ u16 minFrameLength;
+ u16 maxD1Length;
+ u16 maxD2Length;
+ u16 vlantype;
+ u16 vlantci;
+ u32 ecamptr;
+ u32 eventRegMask;
+ u16 pausePeriod;
+ u16 extensionField;
+ u8 phy_address;
+ u32 board_flags;
+ u32 phy_interrupt;
+ u8 weightfactor[NUM_TX_QUEUES];
+ u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
+ u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
+ u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
+ u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u16 bdRingLenTx[NUM_TX_QUEUES];
+ u16 bdRingLenRx[NUM_RX_QUEUES];
+ enet_interface_e enet_interface;
+ ucc_geth_num_of_station_addresses_e numStationAddresses;
+ qe_fltr_largest_external_tbl_lookup_key_size_e
+ largestexternallookupkeysize;
+ ucc_geth_statistics_gathering_mode_e statisticsMode;
+ ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
+ ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
+ ucc_geth_qos_mode_e rxQoSMode;
+ ucc_geth_flow_control_mode_e aufc;
+ ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
+ ucc_geth_num_of_threads_e numThreadsTx;
+ ucc_geth_num_of_threads_e numThreadsRx;
+ qe_risc_allocation_e riscTx;
+ qe_risc_allocation_e riscRx;
+} ucc_geth_info_t;
+
+/* structure representing UCC GETH */
+typedef struct ucc_geth_private {
+ ucc_geth_info_t *ug_info;
+ ucc_fast_private_t *uccf;
+ struct net_device *dev;
+ struct net_device_stats stats; /* linux network statistics */
+ ucc_geth_t *ug_regs;
+ ucc_geth_init_pram_t *p_init_enet_param_shadow;
+ ucc_geth_exf_global_pram_t *p_exf_glbl_param;
+ u32 exf_glbl_param_offset;
+ ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
+ u32 rx_glbl_pram_offset;
+ ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
+ u32 tx_glbl_pram_offset;
+ ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ ucc_geth_thread_data_tx_t *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ ucc_geth_thread_data_rx_t *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ ucc_geth_scheduler_t *p_scheduler;
+ u32 scheduler_offset;
+ ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
+ u32 tx_fw_statistics_pram_offset;
+ ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+ u32 rx_fw_statistics_pram_offset;
+ ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
+ u32 rx_irq_coalescing_tbl_offset;
+ ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ u8 *p_tx_bd_ring[NUM_TX_QUEUES];
+ u32 tx_bd_ring_offset[NUM_TX_QUEUES];
+ u8 *p_rx_bd_ring[NUM_RX_QUEUES];
+ u32 rx_bd_ring_offset[NUM_RX_QUEUES];
+ u8 *confBd[NUM_TX_QUEUES];
+ u8 *txBd[NUM_TX_QUEUES];
+ u8 *rxBd[NUM_RX_QUEUES];
+ int badFrame[NUM_RX_QUEUES];
+ u16 cpucount[NUM_TX_QUEUES];
+ volatile u16 *p_cpucount[NUM_TX_QUEUES];
+ int indAddrRegUsed[NUM_OF_PADDRS];
+ enet_addr_t paddr[NUM_OF_PADDRS];
+ u8 numGroupAddrInHash;
+ u8 numIndAddrInHash;
+ u8 numIndAddrInReg;
+ int rx_extended_features;
+ int rx_non_dynamic_extended_features;
+ struct list_head conf_skbs;
+ struct list_head group_hash_q;
+ struct list_head ind_hash_q;
+ u32 saved_uccm;
+ spinlock_t lock;
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
+ struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx[NUM_TX_QUEUES];
+ u16 skb_currx[NUM_RX_QUEUES];
+ /* index of the first skb which hasn't been transmitted yet. */
+ u16 skb_dirtytx[NUM_TX_QUEUES];
+
+ struct work_struct tq;
+ struct timer_list phy_info_timer;
+ struct ugeth_mii_info *mii_info;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+} ucc_geth_private_t;
+
+#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
new file mode 100644
index 000000000000..f91028c5386d
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.c
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * UCC GETH Driver -- PHY handling
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "ucc_geth.h"
+#include "ucc_geth_phy.h"
+#include <platforms/83xx/mpc8360e_pb.h>
+
+#define ugphy_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugphy_dbg(format, arg...) \
+ ugphy_printk(KERN_DEBUG, format , ## arg)
+#define ugphy_err(format, arg...) \
+ ugphy_printk(KERN_ERR, format , ## arg)
+#define ugphy_info(format, arg...) \
+ ugphy_printk(KERN_INFO, format , ## arg)
+#define ugphy_warn(format, arg...) \
+ ugphy_printk(KERN_WARNING, format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugphy_vdbg ugphy_dbg
+#else
+#define ugphy_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+
+static void config_genmii_advert(struct ugeth_mii_info *mii_info);
+static void genmii_setup_forced(struct ugeth_mii_info *mii_info);
+static void genmii_restart_aneg(struct ugeth_mii_info *mii_info);
+static int gbit_config_aneg(struct ugeth_mii_info *mii_info);
+static int genmii_config_aneg(struct ugeth_mii_info *mii_info);
+static int genmii_update_link(struct ugeth_mii_info *mii_info);
+static int genmii_read_status(struct ugeth_mii_info *mii_info);
+u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
+void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
+
+static u8 *bcsr_regs = NULL;
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_mii_mng_t *mii_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ mii_regs = ugeth->mii_info->mii_regs;
+
+ /* Set this UCC to be the master of the MII managment */
+ ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Stop the MII management read cycle */
+ out_be32(&mii_regs->miimcom, 0);
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32(&mii_regs->miimadd, tmp_reg);
+
+ /* Setting up the MII Mangement Control Register with the value */
+ out_be32(&mii_regs->miimcon, (u32) value);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
+ cpu_relax();
+
+ spin_unlock_irq(&ugeth->lock);
+
+ udelay(10000);
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_mii_mng_t *mii_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+ u16 value;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ mii_regs = ugeth->mii_info->mii_regs;
+
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32(&mii_regs->miimadd, tmp_reg);
+
+ /* Perform an MII management read cycle */
+ out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
+ cpu_relax();
+
+ udelay(10000);
+
+ /* Read MII management status */
+ value = (u16) in_be32(&mii_regs->miimstat);
+ out_be32(&mii_regs->miimcom, 0);
+ if (value == 0xffff)
+ ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x",
+ mii_id, mii_reg, (u32) & (mii_regs->miimcfg));
+
+ spin_unlock_irq(&ugeth->lock);
+
+ return (value);
+}
+
+void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->phyinfo->ack_interrupt)
+ mii_info->phyinfo->ack_interrupt(mii_info);
+}
+
+void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
+ u32 interrupts)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ mii_info->interrupts = interrupts;
+ if (mii_info->phyinfo->config_intr)
+ mii_info->phyinfo->config_intr(mii_info);
+}
+
+/* Writes MII_ADVERTISE with the appropriate values, after
+ * sanitizing advertise to make sure only supported features
+ * are advertised
+ */
+static void config_genmii_advert(struct ugeth_mii_info *mii_info)
+{
+ u32 advertise;
+ u16 adv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Only allow advertising what this PHY supports */
+ mii_info->advertising &= mii_info->phyinfo->features;
+ advertise = mii_info->advertising;
+
+ /* Setup standard advertisement */
+ adv = phy_read(mii_info, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(mii_info, MII_ADVERTISE, adv);
+}
+
+static void genmii_setup_forced(struct ugeth_mii_info *mii_info)
+{
+ u16 ctrl;
+ u32 features = mii_info->phyinfo->features;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ ctrl = phy_read(mii_info, MII_BMCR);
+
+ ctrl &=
+ ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ ctrl |= BMCR_RESET;
+
+ switch (mii_info->speed) {
+ case SPEED_1000:
+ if (features & (SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full)) {
+ ctrl |= BMCR_SPEED1000;
+ break;
+ }
+ mii_info->speed = SPEED_100;
+ case SPEED_100:
+ if (features & (SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full)) {
+ ctrl |= BMCR_SPEED100;
+ break;
+ }
+ mii_info->speed = SPEED_10;
+ case SPEED_10:
+ if (features & (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full))
+ break;
+ default: /* Unsupported speed! */
+ ugphy_err("%s: Bad speed!", mii_info->dev->name);
+ break;
+ }
+
+ phy_write(mii_info, MII_BMCR, ctrl);
+}
+
+/* Enable and Restart Autonegotiation */
+static void genmii_restart_aneg(struct ugeth_mii_info *mii_info)
+{
+ u16 ctl;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ ctl = phy_read(mii_info, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(mii_info, MII_BMCR, ctl);
+}
+
+static int gbit_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ u16 adv;
+ u32 advertise;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->autoneg) {
+ /* Configure the ADVERTISE register */
+ config_genmii_advert(mii_info);
+ advertise = mii_info->advertising;
+
+ adv = phy_read(mii_info, MII_1000BASETCONTROL);
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write(mii_info, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+static int genmii_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->autoneg) {
+ config_genmii_advert(mii_info);
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+static int genmii_update_link(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Do a fake read */
+ phy_read(mii_info, MII_BMSR);
+
+ /* Read link and autonegotiation status */
+ status = phy_read(mii_info, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ mii_info->link = 0;
+ else
+ mii_info->link = 1;
+
+ /* If we are autonegotiating, and not done,
+ * return an error */
+ if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int genmii_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ if (mii_info->autoneg) {
+ status = phy_read(mii_info, MII_LPA);
+
+ if (status & (LPA_10FULL | LPA_100FULL))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ if (status & (LPA_100FULL | LPA_100HALF))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+ mii_info->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int marvell_init(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_write(mii_info, 0x14, 0x0cd2);
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) | BMCR_RESET);
+ msleep(4000);
+
+ return 0;
+}
+
+static int marvell_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation */
+ phy_write(mii_info, MII_BMCR, BMCR_RESET);
+
+ phy_write(mii_info, 0x1d, 0x1f);
+ phy_write(mii_info, 0x1e, 0x200c);
+ phy_write(mii_info, 0x1d, 0x5);
+ phy_write(mii_info, 0x1e, 0);
+ phy_write(mii_info, 0x1e, 0x100);
+
+ gbit_config_aneg(mii_info);
+
+ return 0;
+}
+
+static int marvell_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+ status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
+
+ /* Get the duplexity */
+ if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ /* Get the speed */
+ speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
+ switch (speed) {
+ case MII_M1011_PHY_SPEC_STATUS_1000:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_M1011_PHY_SPEC_STATUS_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ mii_info->pause = 0;
+ }
+
+ return 0;
+}
+
+static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupts by reading the reg */
+ phy_read(mii_info, MII_M1011_IEVENT);
+
+ return 0;
+}
+
+static int marvell_config_intr(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ else
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+ return 0;
+}
+
+static int cis820x_init(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
+ MII_CIS8201_AUXCONSTAT_INIT);
+ phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT);
+
+ return 0;
+}
+
+static int cis820x_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+
+ status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
+ if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
+
+ switch (speed) {
+ case MII_CIS8201_AUXCONSTAT_GBIT:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_CIS8201_AUXCONSTAT_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_read(mii_info, MII_CIS8201_ISTAT);
+
+ return 0;
+}
+
+static int cis820x_config_intr(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
+ else
+ phy_write(mii_info, MII_CIS8201_IMASK, 0);
+
+ return 0;
+}
+
+#define DM9161_DELAY 10
+
+static int dm9161_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ status = phy_read(mii_info, MII_DM9161_SCSR);
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int dm9161_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (0 == priv->resetdone)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void dm9161_timer(unsigned long data)
+{
+ struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
+ struct dm9161_private *priv = mii_info->priv;
+ u16 status = phy_read(mii_info, MII_BMSR);
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (status & BMSR_ANEGCOMPLETE) {
+ priv->resetdone = 1;
+ } else
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+}
+
+static int dm9161_init(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Allocate the private data structure */
+ priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
+
+ if (NULL == priv)
+ return -ENOMEM;
+
+ mii_info->priv = priv;
+
+ /* Reset is not done yet */
+ priv->resetdone = 0;
+
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) | BMCR_RESET);
+
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE);
+
+ config_genmii_advert(mii_info);
+ /* Start/Restart aneg */
+ genmii_config_aneg(mii_info);
+
+ /* Start a timer for DM9161_DELAY seconds to wait
+ * for the PHY to be ready */
+ init_timer(&priv->timer);
+ priv->timer.function = &dm9161_timer;
+ priv->timer.data = (unsigned long)mii_info;
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+
+ return 0;
+}
+
+static void dm9161_close(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ del_timer_sync(&priv->timer);
+ kfree(priv);
+}
+
+static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+/* FIXME: This lines are for BUG fixing in the mpc8325.
+Remove this from here when it's fixed */
+ if (bcsr_regs == NULL)
+ bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
+ bcsr_regs[14] |= 0x40;
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupts by reading the reg */
+ phy_read(mii_info, MII_DM9161_INTR);
+
+
+ return 0;
+}
+
+static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
+{
+/* FIXME: This lines are for BUG fixing in the mpc8325.
+Remove this from here when it's fixed */
+ if (bcsr_regs == NULL) {
+ bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
+ bcsr_regs[14] &= ~0x40;
+ }
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
+ else
+ phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
+
+ return 0;
+}
+
+/* Cicada 820x */
+static struct phy_info phy_info_cis820x = {
+ .phy_id = 0x000fc440,
+ .name = "Cicada Cis8204",
+ .phy_id_mask = 0x000fffc0,
+ .features = MII_GBIT_FEATURES,
+ .init = &cis820x_init,
+ .config_aneg = &gbit_config_aneg,
+ .read_status = &cis820x_read_status,
+ .ack_interrupt = &cis820x_ack_interrupt,
+ .config_intr = &cis820x_config_intr,
+};
+
+static struct phy_info phy_info_dm9161 = {
+ .phy_id = 0x0181b880,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161E",
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_dm9161a = {
+ .phy_id = 0x0181b8a0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161A",
+ .features = MII_BASIC_FEATURES,
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_marvell = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E11x1",
+ .features = MII_GBIT_FEATURES,
+ .init = &marvell_init,
+ .config_aneg = &marvell_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+};
+
+static struct phy_info phy_info_genmii = {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .features = MII_BASIC_FEATURES,
+ .config_aneg = genmii_config_aneg,
+ .read_status = genmii_read_status,
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_cis820x,
+ &phy_info_marvell,
+ &phy_info_dm9161,
+ &phy_info_dm9161a,
+ &phy_info_genmii,
+ NULL
+};
+
+u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum)
+{
+ u16 retval;
+ unsigned long flags;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+
+ return retval;
+}
+
+void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val)
+{
+ unsigned long flags;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+}
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+ struct net_device *dev = mii_info->dev;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = phy_read(mii_info, MII_PHYSID1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = phy_read(mii_info, MII_PHYSID2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){
+ theInfo = phy_info[i];
+ break;
+ }
+
+ /* This shouldn't happen, as we have generic PHY support */
+ if (theInfo == NULL) {
+ ugphy_info("%s: PHY id %x is not supported!", dev->name,
+ phy_ID);
+ return NULL;
+ } else {
+ ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name,
+ phy_ID);
+ }
+
+ return theInfo;
+}
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
new file mode 100644
index 000000000000..2f98b8f1bb0a
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * UCC GETH Driver -- PHY handling
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __UCC_GETH_PHY_H__
+#define __UCC_GETH_PHY_H__
+
+#define MII_end ((u32)-2)
+#define MII_read ((u32)-1)
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define UGETH_AN_TIMEOUT 2000
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1 0x17
+#define MII_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK 0x19
+#define MII_CIS8201_IMASK_IEN 0x8000
+#define MII_CIS8201_IMASK_SPEED 0x4000
+#define MII_CIS8201_IMASK_LINK 0x2000
+#define MII_CIS8201_IMASK_DUPLEX 0x1000
+#define MII_CIS8201_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT 0x1a
+#define MII_CIS8201_ISTAT_STATUS 0x8000
+#define MII_CIS8201_ISTAT_SPEED 0x4000
+#define MII_CIS8201_ISTAT_LINK 0x2000
+#define MII_CIS8201_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT 0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MII_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
+
+#define MII_M1011_IEVENT 0x13
+#define MII_M1011_IEVENT_CLEAR 0x0000
+
+#define MII_M1011_IMASK 0x12
+#define MII_M1011_IMASK_INIT 0x6400
+#define MII_M1011_IMASK_CLEAR 0x0000
+
+#define MII_DM9161_SCR 0x10
+#define MII_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Specified Configuration and Status Register */
+#define MII_DM9161_SCSR 0x11
+#define MII_DM9161_SCSR_100F 0x8000
+#define MII_DM9161_SCSR_100H 0x4000
+#define MII_DM9161_SCSR_10F 0x2000
+#define MII_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR 0x15
+#define MII_DM9161_INTR_PEND 0x8000
+#define MII_DM9161_INTR_DPLX_MASK 0x0800
+#define MII_DM9161_INTR_SPD_MASK 0x0400
+#define MII_DM9161_INTR_LINK_MASK 0x0200
+#define MII_DM9161_INTR_MASK 0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MII_DM9161_INTR_SPD_CHANGE 0x0008
+#define MII_DM9161_INTR_LINK_CHANGE 0x0004
+#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_STOP \
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR 0x12
+#define MII_DM9161_10BTCSR_INIT 0x7800
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+#define MII_READ_COMMAND 0x00000001
+
+#define MII_INTERRUPT_DISABLED 0x0
+#define MII_INTERRUPT_ENABLED 0x1
+/* Taken from mii_if_info and sungem_phy.h */
+struct ugeth_mii_info {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_info *phyinfo;
+
+ ucc_mii_mng_t *mii_regs;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ u32 advertising;
+ int autoneg;
+ int mii_id;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+
+ /* A lock to ensure that only one thing can read/write
+ * the MDIO bus at a time */
+ spinlock_t mdio_lock;
+
+ /* Provided by ethernet driver */
+ int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
+ int val);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be ANDed with phy_id_mask to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * There are 6 commands which take a ugeth_mii_info structure.
+ * Each PHY must declare config_aneg, and read_status.
+ */
+struct phy_info {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+
+ /* Called to initialize the PHY */
+ int (*init) (struct ugeth_mii_info * mii_info);
+
+ /* Called to suspend the PHY for power */
+ int (*suspend) (struct ugeth_mii_info * mii_info);
+
+ /* Reconfigures autonegotiation (or disables it) */
+ int (*config_aneg) (struct ugeth_mii_info * mii_info);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status) (struct ugeth_mii_info * mii_info);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt) (struct ugeth_mii_info * mii_info);
+
+ /* Enables or disables interrupts */
+ int (*config_intr) (struct ugeth_mii_info * mii_info);
+
+ /* Clears up any memory if needed */
+ void (*close) (struct ugeth_mii_info * mii_info);
+};
+
+struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info);
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
+void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info);
+void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
+ u32 interrupts);
+
+struct dm9161_private {
+ struct timer_list timer;
+ int resetdone;
+};
+
+#endif /* __UCC_GETH_PHY_H__ */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 98b6f3207d3d..ae971080e2e4 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -25,117 +25,13 @@
version. He may or may not be interested in bug reports on this
code. You can find his versions at:
http://www.scyld.com/network/via-rhine.html
-
-
- Linux kernel version history:
-
- LK1.1.0:
- - Jeff Garzik: softnet 'n stuff
-
- LK1.1.1:
- - Justin Guyett: softnet and locking fixes
- - Jeff Garzik: use PCI interface
-
- LK1.1.2:
- - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
-
- LK1.1.3:
- - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
- code) update "Theory of Operation" with
- softnet/locking changes
- - Dave Miller: PCI DMA and endian fixups
- - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
-
- LK1.1.4:
- - Urban Widmark: fix gcc 2.95.2 problem and
- remove writel's to fixed address 0x7c
-
- LK1.1.5:
- - Urban Widmark: mdio locking, bounce buffer changes
- merges from Beckers 1.05 version
- added netif_running_on/off support
-
- LK1.1.6:
- - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
- set netif_running_on/off on startup, del_timer_sync
-
- LK1.1.7:
- - Manfred Spraul: added reset into tx_timeout
-
- LK1.1.9:
- - Urban Widmark: merges from Beckers 1.10 version
- (media selection + eeprom reload)
- - David Vrabel: merges from D-Link "1.11" version
- (disable WOL and PME on startup)
-
- LK1.1.10:
- - Manfred Spraul: use "singlecopy" for unaligned buffers
- don't allocate bounce buffers for !ReqTxAlign cards
-
- LK1.1.11:
- - David Woodhouse: Set dev->base_addr before the first time we call
- wait_for_reset(). It's a lot happier that way.
- Free np->tx_bufs only if we actually allocated it.
-
- LK1.1.12:
- - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
-
- LK1.1.13 (jgarzik):
- - Add ethtool support
- - Replace some MII-related magic numbers with constants
-
- LK1.1.14 (Ivan G.):
- - fixes comments for Rhine-III
- - removes W_MAX_TIMEOUT (unused)
- - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
- is R-I and has Davicom chip, flag is referenced in kernel driver)
- - sends chip_id as a parameter to wait_for_reset since np is not
- initialized on first call
- - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
- for Rhine-III's (documentation says same bit is correct)
- - transmit frame queue message is off by one - fixed
- - adds IntrNormalSummary to "Something Wicked" exclusion list
- so normal interrupts will not trigger the message (src: Donald Becker)
- (Roger Luethi)
- - show confused chip where to continue after Tx error
- - location of collision counter is chip specific
- - allow selecting backoff algorithm (module parameter)
-
- LK1.1.15 (jgarzik):
- - Use new MII lib helper generic_mii_ioctl
-
- LK1.1.16 (Roger Luethi)
- - Etherleak fix
- - Handle Tx buffer underrun
- - Fix bugs in full duplex handling
- - New reset code uses "force reset" cmd on Rhine-II
- - Various clean ups
-
- LK1.1.17 (Roger Luethi)
- - Fix race in via_rhine_start_tx()
- - On errors, wait for Tx engine to turn off before scavenging
- - Handle Tx descriptor write-back race on Rhine-II
- - Force flushing for PCI posted writes
- - More reset code changes
-
- LK1.1.18 (Roger Luethi)
- - No filtering multicast in promisc mode (Edward Peng)
- - Fix for Rhine-I Tx timeouts
-
- LK1.1.19 (Roger Luethi)
- - Increase Tx threshold for unspecified errors
-
- LK1.2.0-2.6 (Roger Luethi)
- - Massive clean-up
- - Rewrite PHY, media handling (remove options, full_duplex, backoff)
- - Fix Tx engine race for good
- - Craig Brind: Zero padded aligned buffers for short packets.
+ [link no longer provides useful info -jgarzik]
*/
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.2.0-2.6"
-#define DRV_RELDATE "June-10-2004"
+#define DRV_VERSION "1.4.1"
+#define DRV_RELDATE "July-24-2006"
/* A few user-configurable values.
@@ -148,6 +44,10 @@ static int max_interrupt_work = 20;
Setting to > 1518 effectively disables this feature. */
static int rx_copybreak;
+/* Work-around for broken BIOSes: they are unable to get the chip back out of
+ power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
+static int avoid_D3;
+
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
* are gone. Use ethtool(8) instead.
@@ -167,7 +67,11 @@ static const int multicast_filter_limit = 32;
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#ifdef CONFIG_VIA_RHINE_NAPI
+#define RX_RING_SIZE 64
+#else
#define RX_RING_SIZE 16
+#endif
/* Operational parameters that usually are not changed. */
@@ -220,9 +124,11 @@ MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
+module_param(avoid_D3, bool, 0);
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
/*
Theory of Operation
@@ -356,12 +262,11 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static struct pci_device_id rhine_pci_tbl[] =
-{
- {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
- {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
- {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
- {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
+static const struct pci_device_id rhine_pci_tbl[] = {
+ { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
+ { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
+ { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
+ { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
@@ -501,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void rhine_tx(struct net_device *dev);
-static void rhine_rx(struct net_device *dev);
+static int rhine_rx(struct net_device *dev, int limit);
static void rhine_error(struct net_device *dev, int intr_status);
static void rhine_set_rx_mode(struct net_device *dev);
static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -669,6 +574,32 @@ static void rhine_poll(struct net_device *dev)
}
#endif
+#ifdef CONFIG_VIA_RHINE_NAPI
+static int rhine_napipoll(struct net_device *dev, int *budget)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int done, limit = min(dev->quota, *budget);
+
+ done = rhine_rx(dev, limit);
+ *budget -= done;
+ dev->quota -= done;
+
+ if (done < limit) {
+ netif_rx_complete(dev);
+
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+ return 0;
+ }
+ else
+ return 1;
+}
+#endif
+
static void rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -849,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = rhine_poll;
#endif
+#ifdef CONFIG_VIA_RHINE_NAPI
+ dev->poll = rhine_napipoll;
+ dev->weight = 64;
+#endif
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
@@ -894,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
}
}
rp->mii_if.phy_id = phy_id;
+ if (debug > 1 && avoid_D3)
+ printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
+ dev->name);
return 0;
@@ -1119,6 +1057,8 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
+ netif_poll_enable(dev);
+
/* Enable interrupts by setting the interrupt mask. */
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1373,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
dev->name, intr_status);
if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
- IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
- rhine_rx(dev);
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
+#ifdef CONFIG_VIA_RHINE_NAPI
+ iowrite16(IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ netif_rx_schedule(dev);
+#else
+ rhine_rx(dev, RX_RING_SIZE);
+#endif
+ }
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
@@ -1472,13 +1422,12 @@ static void rhine_tx(struct net_device *dev)
spin_unlock(&rp->lock);
}
-/* This routine is logically part of the interrupt handler, but isolated
- for clarity and better register allocation. */
-static void rhine_rx(struct net_device *dev)
+/* Process up to limit frames from receive ring */
+static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ int count;
int entry = rp->cur_rx % RX_RING_SIZE;
- int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
if (debug > 4) {
printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1487,16 +1436,18 @@ static void rhine_rx(struct net_device *dev)
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
- while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ for (count = 0; count < limit; ++count) {
struct rx_desc *desc = rp->rx_head_desc;
u32 desc_status = le32_to_cpu(desc->rx_status);
int data_size = desc_status >> 16;
+ if (desc_status & DescOwn)
+ break;
+
if (debug > 4)
printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
desc_status);
- if (--boguscnt < 0)
- break;
+
if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
if ((desc_status & RxWholePkt) != RxWholePkt) {
printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1565,7 +1516,11 @@ static void rhine_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans(skb, dev);
+#ifdef CONFIG_VIA_RHINE_NAPI
+ netif_receive_skb(skb);
+#else
netif_rx(skb);
+#endif
dev->last_rx = jiffies;
rp->stats.rx_bytes += pkt_len;
rp->stats.rx_packets++;
@@ -1592,6 +1547,8 @@ static void rhine_rx(struct net_device *dev)
}
rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
+
+ return count;
}
/*
@@ -1881,6 +1838,7 @@ static int rhine_close(struct net_device *dev)
spin_lock_irq(&rp->lock);
netif_stop_queue(dev);
+ netif_poll_disable(dev);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1962,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
}
/* Hit power state D3 (sleep) */
- iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ if (!avoid_D3)
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
/* TODO: Check use of pci_enable_wake() */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index ba2972ba3757..aa9cd92f46b2 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -229,7 +229,8 @@ static int rx_copybreak = 200;
module_param(rx_copybreak, int, 0644);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
-static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
+static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
+ const struct velocity_info_tbl *info);
static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
static void velocity_print_info(struct velocity_info *vptr);
static int velocity_open(struct net_device *dev);
@@ -294,9 +295,9 @@ static void velocity_unregister_notifier(void)
* Internal board variants. At the moment we have only one
*/
-static struct velocity_info_tbl chip_info_table[] = {
- {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL},
- {0, NULL}
+static const struct velocity_info_tbl chip_info_table[] __devinitdata = {
+ {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
+ { }
};
/*
@@ -304,10 +305,9 @@ static struct velocity_info_tbl chip_info_table[] = {
* device driver. Used for hotplug autoloading.
*/
-static struct pci_device_id velocity_id_table[] __devinitdata = {
- {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table},
- {0, }
+static const struct pci_device_id velocity_id_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
+ { }
};
MODULE_DEVICE_TABLE(pci, velocity_id_table);
@@ -341,7 +341,7 @@ static char __devinit *get_chip_name(enum chip_type chip_id)
static void __devexit velocity_remove1(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
#ifdef CONFIG_PM
unsigned long flags;
@@ -686,21 +686,23 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
static int first = 1;
struct net_device *dev;
int i;
- struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data;
+ const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
struct velocity_info *vptr;
struct mac_regs __iomem * regs;
int ret = -ENOMEM;
+ /* FIXME: this driver, like almost all other ethernet drivers,
+ * can support more than MAX_UNITS.
+ */
if (velocity_nics >= MAX_UNITS) {
- printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n",
- velocity_nics);
+ dev_notice(&pdev->dev, "already found %d NICs.\n",
+ velocity_nics);
return -ENODEV;
}
dev = alloc_etherdev(sizeof(struct velocity_info));
-
- if (dev == NULL) {
- printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n");
+ if (!dev) {
+ dev_err(&pdev->dev, "allocate net device failed.\n");
goto out;
}
@@ -708,7 +710,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- vptr = dev->priv;
+ vptr = netdev_priv(dev);
if (first) {
@@ -731,17 +733,17 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
ret = velocity_get_pci_info(vptr, pdev);
if (ret < 0) {
- printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
+ /* error message already printed */
goto err_disable;
}
ret = pci_request_regions(pdev, VELOCITY_NAME);
if (ret < 0) {
- printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
+ dev_err(&pdev->dev, "No PCI resources.\n");
goto err_disable;
}
- regs = ioremap(vptr->memaddr, vptr->io_size);
+ regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
if (regs == NULL) {
ret = -EIO;
goto err_release_res;
@@ -859,13 +861,14 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
* discovered.
*/
-static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info)
+static void __devinit velocity_init_info(struct pci_dev *pdev,
+ struct velocity_info *vptr,
+ const struct velocity_info_tbl *info)
{
memset(vptr, 0, sizeof(struct velocity_info));
vptr->pdev = pdev;
vptr->chip_id = info->chip_id;
- vptr->io_size = info->io_size;
vptr->num_txq = info->txqueue;
vptr->multicast_limit = MCAM_SIZE;
spin_lock_init(&vptr->lock);
@@ -883,8 +886,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_i
static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
{
-
- if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
+ if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
return -EIO;
pci_set_master(pdev);
@@ -892,24 +894,20 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
vptr->ioaddr = pci_resource_start(pdev, 0);
vptr->memaddr = pci_resource_start(pdev, 1);
- if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO))
- {
- printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n",
- pci_name(pdev));
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ dev_err(&pdev->dev,
+ "region #0 is not an I/O resource, aborting.\n");
return -EINVAL;
}
- if((pci_resource_flags(pdev, 1) & IORESOURCE_IO))
- {
- printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n",
- pci_name(pdev));
+ if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
+ dev_err(&pdev->dev,
+ "region #1 is an I/O resource, aborting.\n");
return -EINVAL;
}
- if(pci_resource_len(pdev, 1) < 256)
- {
- printk(KERN_ERR "%s: region #1 is too small.\n",
- pci_name(pdev));
+ if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
+ dev_err(&pdev->dev, "region #1 is too small.\n");
return -EINVAL;
}
vptr->pdev = pdev;
@@ -1728,7 +1726,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
static int velocity_open(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
int ret;
vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
@@ -1785,7 +1783,7 @@ err_free_desc_rings:
static int velocity_change_mtu(struct net_device *dev, int new_mtu)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
unsigned long flags;
int oldmtu = dev->mtu;
int ret = 0;
@@ -1861,7 +1859,7 @@ static void velocity_shutdown(struct velocity_info *vptr)
static int velocity_close(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
netif_stop_queue(dev);
velocity_shutdown(vptr);
@@ -1894,7 +1892,7 @@ static int velocity_close(struct net_device *dev)
static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
int qnum = 0;
struct tx_desc *td_ptr;
struct velocity_td_info *tdinfo;
@@ -2049,7 +2047,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *dev = dev_instance;
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
u32 isr_status;
int max_count = 0;
@@ -2104,7 +2102,7 @@ static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
static void velocity_set_multi(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem * regs = vptr->mac_regs;
u8 rx_mode;
int i;
@@ -2153,7 +2151,7 @@ static void velocity_set_multi(struct net_device *dev)
static struct net_device_stats *velocity_get_stats(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
/* If the hardware is down, don't touch MII */
if(!netif_running(dev))
@@ -2196,7 +2194,7 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
int ret;
/* If we are asked for information and the device is power
@@ -2744,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
if (PHYSR0 & PHYSR0_SPDG)
status |= VELOCITY_SPEED_1000;
- if (PHYSR0 & PHYSR0_SPD10)
+ else if (PHYSR0 & PHYSR0_SPD10)
status |= VELOCITY_SPEED_10;
else
status |= VELOCITY_SPEED_100;
@@ -2825,7 +2823,7 @@ static void enable_flow_control_ability(struct velocity_info *vptr)
static int velocity_ethtool_up(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
if (!netif_running(dev))
pci_set_power_state(vptr->pdev, PCI_D0);
return 0;
@@ -2841,20 +2839,29 @@ static int velocity_ethtool_up(struct net_device *dev)
static void velocity_ethtool_down(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
if (!netif_running(dev))
pci_set_power_state(vptr->pdev, PCI_D3hot);
}
static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem * regs = vptr->mac_regs;
u32 status;
status = check_connection_type(vptr->mac_regs);
- cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
- if (status & VELOCITY_SPEED_100)
+ cmd->supported = SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (status & VELOCITY_SPEED_1000)
+ cmd->speed = SPEED_1000;
+ else if (status & VELOCITY_SPEED_100)
cmd->speed = SPEED_100;
else
cmd->speed = SPEED_10;
@@ -2873,7 +2880,7 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
u32 curr_status;
u32 new_status = 0;
int ret = 0;
@@ -2896,14 +2903,14 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
static u32 velocity_get_link(struct net_device *dev)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem * regs = vptr->mac_regs;
- return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1;
+ return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
}
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
strcpy(info->driver, VELOCITY_NAME);
strcpy(info->version, VELOCITY_VERSION);
strcpy(info->bus_info, pci_name(vptr->pdev));
@@ -2911,7 +2918,7 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
wol->wolopts |= WAKE_MAGIC;
/*
@@ -2927,7 +2934,7 @@ static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_woli
static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
return -EFAULT;
@@ -2992,7 +2999,7 @@ static struct ethtool_ops velocity_ethtool_ops = {
static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct velocity_info *vptr = dev->priv;
+ struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem * regs = vptr->mac_regs;
unsigned long flags;
struct mii_ioctl_data *miidata = if_mii(ifr);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f1b2640ebdc6..496c3d597444 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -31,6 +31,8 @@
#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
#define VELOCITY_VERSION "1.13"
+#define VELOCITY_IO_SIZE 256
+
#define PKT_BUF_SZ 1540
#define MAX_UNITS 8
@@ -1191,7 +1193,6 @@ enum chip_type {
struct velocity_info_tbl {
enum chip_type chip_id;
char *name;
- int io_size;
int txqueue;
u32 flags;
};
@@ -1751,7 +1752,6 @@ struct velocity_info {
struct mac_regs __iomem * mac_regs;
unsigned long memaddr;
unsigned long ioaddr;
- u32 io_size;
u8 rev_id;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index b5328b0ff927..54b8e492ef97 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -134,18 +134,6 @@ config SEALEVEL_4021
The driver will be compiled as a module: the
module will be called sealevel.
-config SYNCLINK_SYNCPPP
- tristate "SyncLink HDLC/SYNCPPP support"
- depends on WAN
- help
- Enables HDLC/SYNCPPP support for the SyncLink WAN driver.
-
- Normally the SyncLink WAN driver works with the main PPP driver
- <file:drivers/net/ppp_generic.c> and pppd program.
- HDLC/SYNCPPP support allows use of the Cisco HDLC/PPP driver
- <file:drivers/net/wan/syncppp.c>. The SyncLink WAN driver (in
- character devices) must also be enabled.
-
# Generic HDLC
config HDLC
tristate "Generic HDLC layer"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 823c6d5ab90d..316ca6869d5e 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_COSA) += syncppp.o cosa.o
obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o
obj-$(CONFIG_DSCC4) += dscc4.o
obj-$(CONFIG_LANMEDIA) += syncppp.o
-obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o
obj-$(CONFIG_X25_ASY) += x25_asy.o
obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c92ac9fde083..6b63b350cd52 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -116,27 +116,34 @@ static inline void openwin(card_t *card, u8 page)
#include "hd6457x.c"
+static inline void set_carrier(port_t *port)
+{
+ if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
+ netif_carrier_on(port_to_dev(port));
+ else
+ netif_carrier_off(port_to_dev(port));
+}
+
+
static void sca_msci_intr(port_t *port)
{
- struct net_device *dev = port_to_dev(port);
- card_t* card = port_to_card(port);
- u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */
+ u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
- /* Reset MSCI TX underrun status bit */
- sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card);
+ /* Reset MSCI TX underrun and CDCD (ignored) status bit */
+ sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
- struct net_device_stats *stats = hdlc_stats(dev);
+ struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
stats->tx_errors++; /* TX Underrun error detected */
stats->tx_fifo_errors++;
}
+ stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
- sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card);
+ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
if (stat & ST1_CDCD)
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD),
- dev);
+ set_carrier(port);
}
@@ -190,8 +197,7 @@ static int c101_open(struct net_device *dev)
sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev);
- printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
+ set_carrier(port);
/* enable MSCI1 CDCD interrupt */
sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
@@ -378,7 +384,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
}
sca_init_sync_port(card); /* Set up C101 memory */
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev);
+ set_carrier(card);
printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
" using %u TX + %u RX packets rings\n",
@@ -443,4 +449,5 @@ module_exit(c101_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Moxa C101 serial port driver");
MODULE_LICENSE("GPL v2");
-module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c
index d3743321a977..dce2bb317b82 100644
--- a/drivers/net/wan/hd6457x.c
+++ b/drivers/net/wan/hd6457x.c
@@ -168,6 +168,23 @@ static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
}
+static inline void sca_set_carrier(port_t *port)
+{
+ if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier on\n",
+ port_to_dev(port)->name);
+#endif
+ netif_carrier_on(port_to_dev(port));
+ } else {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier off\n",
+ port_to_dev(port)->name);
+#endif
+ netif_carrier_off(port_to_dev(port));
+ }
+}
+
static void sca_init_sync_port(port_t *port)
{
@@ -237,9 +254,7 @@ static void sca_init_sync_port(port_t *port)
sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
}
}
-
- hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
- port_to_dev(port));
+ sca_set_carrier(port);
}
@@ -262,8 +277,7 @@ static inline void sca_msci_intr(port_t *port)
}
if (stat & ST1_CDCD)
- hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
- port_to_dev(port));
+ sca_set_carrier(port);
}
#endif
@@ -566,7 +580,7 @@ static void sca_open(struct net_device *dev)
- all DMA interrupts
*/
- hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev);
+ sca_set_carrier(port);
#ifdef __HD64570_H
/* MSCI TX INT and RX INT A IRQ enable */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 1fd04662c4fc..f289daba0c7b 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -192,9 +192,7 @@ static int cisco_rx(struct sk_buff *skb)
"uptime %ud%uh%um%us)\n",
dev->name, days, hrs,
min, sec);
-#if 0
- netif_carrier_on(dev);
-#endif
+ netif_dormant_off(dev);
hdlc->state.cisco.up = 1;
}
}
@@ -227,9 +225,7 @@ static void cisco_timer(unsigned long arg)
hdlc->state.cisco.settings.timeout * HZ)) {
hdlc->state.cisco.up = 0;
printk(KERN_INFO "%s: Link down\n", dev->name);
-#if 0
- netif_carrier_off(dev);
-#endif
+ netif_dormant_on(dev);
}
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
@@ -265,10 +261,7 @@ static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&hdlc->state.cisco.timer);
-#if 0
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
-#endif
+ netif_dormant_on(dev);
hdlc->state.cisco.up = 0;
hdlc->state.cisco.request_sent = 0;
}
@@ -328,6 +321,7 @@ int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->type = ARPHRD_CISCO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
+ netif_dormant_on(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 523afe17564e..7bb737bbdeb9 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -301,7 +301,7 @@ static int pvc_open(struct net_device *dev)
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->master);
if (hdlc->state.fr.settings.lmi == LMI_NONE)
- pvc->state.active = hdlc->carrier;
+ pvc->state.active = netif_carrier_ok(pvc->master);
pvc_carrier(pvc->state.active, pvc);
hdlc->state.fr.dce_changed = 1;
@@ -545,11 +545,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
hdlc->state.fr.reliable = reliable;
if (reliable) {
-#if 0
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-#endif
-
+ netif_dormant_off(dev);
hdlc->state.fr.n391cnt = 0; /* Request full status */
hdlc->state.fr.dce_changed = 1;
@@ -562,11 +558,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
}
} else {
-#if 0
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
-#endif
-
+ netif_dormant_on(dev);
while (pvc) { /* Deactivate all PVCs */
pvc_carrier(0, pvc);
pvc->state.exist = pvc->state.active = 0;
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index b7da55140fbd..04ca1f7b6424 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -34,10 +34,11 @@
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
#include <linux/hdlc.h>
-static const char* version = "HDLC support module revision 1.18";
+static const char* version = "HDLC support module revision 1.19";
#undef DEBUG_LINK
@@ -73,57 +74,51 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
-static void __hdlc_set_carrier_on(struct net_device *dev)
+static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.start)
return hdlc->proto.start(dev);
-#if 0
-#ifdef DEBUG_LINK
- if (netif_carrier_ok(dev))
- printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
-#endif
- netif_carrier_on(dev);
-#endif
}
-static void __hdlc_set_carrier_off(struct net_device *dev)
+static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.stop)
return hdlc->proto.stop(dev);
-
-#if 0
-#ifdef DEBUG_LINK
- if (!netif_carrier_ok(dev))
- printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
-#endif
- netif_carrier_off(dev);
-#endif
}
-void hdlc_set_carrier(int on, struct net_device *dev)
+static int hdlc_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
- hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct net_device *dev = ptr;
+ hdlc_device *hdlc;
unsigned long flags;
- on = on ? 1 : 0;
+ int on;
+
+ if (dev->get_stats != hdlc_get_stats)
+ return NOTIFY_DONE; /* not an HDLC device */
+
+ if (event != NETDEV_CHANGE)
+ return NOTIFY_DONE; /* Only interrested in carrier changes */
+
+ on = netif_carrier_ok(dev);
#ifdef DEBUG_LINK
- printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
+ printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n",
+ dev->name, on);
#endif
+ hdlc = dev_to_hdlc(dev);
spin_lock_irqsave(&hdlc->state_lock, flags);
if (hdlc->carrier == on)
goto carrier_exit; /* no change in DCD line level */
-#ifdef DEBUG_LINK
- printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
-#endif
hdlc->carrier = on;
if (!hdlc->open)
@@ -131,14 +126,15 @@ void hdlc_set_carrier(int on, struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
- __hdlc_set_carrier_on(dev);
+ hdlc_proto_start(dev);
} else {
printk(KERN_INFO "%s: Carrier lost\n", dev->name);
- __hdlc_set_carrier_off(dev);
+ hdlc_proto_stop(dev);
}
carrier_exit:
spin_unlock_irqrestore(&hdlc->state_lock, flags);
+ return NOTIFY_DONE;
}
@@ -165,7 +161,7 @@ int hdlc_open(struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
- __hdlc_set_carrier_on(dev);
+ hdlc_proto_start(dev);
} else
printk(KERN_INFO "%s: No carrier\n", dev->name);
@@ -190,7 +186,7 @@ void hdlc_close(struct net_device *dev)
hdlc->open = 0;
if (hdlc->carrier)
- __hdlc_set_carrier_off(dev);
+ hdlc_proto_stop(dev);
spin_unlock_irq(&hdlc->state_lock);
@@ -303,7 +299,6 @@ MODULE_LICENSE("GPL v2");
EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
-EXPORT_SYMBOL(hdlc_set_carrier);
EXPORT_SYMBOL(hdlc_ioctl);
EXPORT_SYMBOL(hdlc_setup);
EXPORT_SYMBOL(alloc_hdlcdev);
@@ -315,9 +310,18 @@ static struct packet_type hdlc_packet_type = {
};
+static struct notifier_block hdlc_notifier = {
+ .notifier_call = hdlc_device_event,
+};
+
+
static int __init hdlc_module_init(void)
{
+ int result;
+
printk(KERN_INFO "%s\n", version);
+ if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
+ return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
@@ -327,6 +331,7 @@ static int __init hdlc_module_init(void)
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
+ unregister_netdevice_notifier(&hdlc_notifier);
}
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81263eaede0..fbaab5bf71eb 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->hard_header = NULL;
dev->type = ARPHRD_PPP;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 9456d31cb1c1..f15aa6ba77f1 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->type = ARPHRD_RAWHDLC;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index b1285cc8fee6..d1884987f94e 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->tx_queue_len = old_qlen;
memcpy(dev->dev_addr, "\x00\x01", 2);
get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 07e5eef1fe0f..a867fb411f89 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->hard_header = NULL;
dev->type = ARPHRD_X25;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index e013b817cab8..dcf46add3adf 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -564,4 +564,5 @@ module_exit(n2_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("RISCom/N2 serial port driver");
MODULE_LICENSE("GPL v2");
-module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index d564224cdca9..b2031dfc4bb1 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -149,7 +149,10 @@ static inline void wanxl_cable_intr(port_t *port)
printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
port->dev->name, pm, dte, cable, dsr, dcd);
- hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev);
+ if (value & STATUS_CABLE_DCD)
+ netif_carrier_on(port->dev);
+ else
+ netif_carrier_off(port->dev);
}
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7caa8dc88a58..b1ba1872f315 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
-init_module(void)
+
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 30ec235e6935..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
select CRYPTO
+ select CRYPTO_AES
---help---
This is the standard Linux driver to support Cisco/Aironet PCMCIA
802.11 wireless cards. This driver is the same as the Aironet
@@ -550,6 +551,7 @@ config USB_ZD1201
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/bcm43xx/Kconfig"
+source "drivers/net/wireless/zd1211rw/Kconfig"
# yes, this works even when no drivers are selected
config NET_WIRELESS
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 512603de309a..c613af17a159 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_PRISM54) += prism54/
obj-$(CONFIG_HOSTAP) += hostap/
obj-$(CONFIG_BCM43XX) += bcm43xx/
+obj-$(CONFIG_ZD1211RW) += zd1211rw/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index d8f5600578b4..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -1547,7 +1547,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm)
goto generate_new;
/* Get the noise samples. */
- assert(bcm->noisecalc.nr_samples <= 8);
+ assert(bcm->noisecalc.nr_samples < 8);
i = bcm->noisecalc.nr_samples;
noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
@@ -1885,6 +1885,15 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
spin_lock(&bcm->irq_lock);
+ /* Only accept IRQs, if we are initialized properly.
+ * This avoids an RX race while initializing.
+ * We should probably not enable IRQs before we are initialized
+ * completely, but some careful work is needed to fix this. I think it
+ * is best to stay with this cheap workaround for now... .
+ */
+ if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED))
+ goto out;
+
reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) {
/* irq not for us (shared irq) */
@@ -1906,19 +1915,11 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
bcm43xx_interrupt_ack(bcm, reason);
- /* Only accept IRQs, if we are initialized properly.
- * This avoids an RX race while initializing.
- * We should probably not enable IRQs before we are initialized
- * completely, but some careful work is needed to fix this. I think it
- * is best to stay with this cheap workaround for now... .
- */
- if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
- /* disable all IRQs. They are enabled again in the bottom half. */
- bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
- /* save the reason code and call our bottom half. */
- bcm->irq_reason = reason;
- tasklet_schedule(&bcm->isr_tasklet);
- }
+ /* disable all IRQs. They are enabled again in the bottom half. */
+ bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
+ /* save the reason code and call our bottom half. */
+ bcm->irq_reason = reason;
+ tasklet_schedule(&bcm->isr_tasklet);
out:
mmiowb();
@@ -3698,6 +3699,10 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
secinfo->encrypt = sec->encrypt;
dprintk(", .encrypt = %d", sec->encrypt);
}
+ if (sec->flags & SEC_AUTH_MODE) {
+ secinfo->auth_mode = sec->auth_mode;
+ dprintk(", .auth_mode = %d", sec->auth_mode);
+ }
dprintk("\n");
if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
!bcm->ieee->host_encrypt) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index 30a202b258b5..116493671f88 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -112,30 +112,6 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
return bcm43xx_channel_to_freq_bg(channel);
}
-/* Lightweight function to check if a channel number is valid.
- * Note that this does _NOT_ check for geographical restrictions!
- */
-static inline
-int bcm43xx_is_valid_channel_a(u8 channel)
-{
- return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
- && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
-}
-static inline
-int bcm43xx_is_valid_channel_bg(u8 channel)
-{
- return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
- && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
-}
-static inline
-int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
- u8 channel)
-{
- if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
- return bcm43xx_is_valid_channel_a(channel);
- return bcm43xx_is_valid_channel_bg(channel);
-}
-
void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf);
void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
index af5c0bff1696..bb9c484d7e19 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
@@ -1594,11 +1594,11 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm,
u16 r8, tmp;
u16 freq;
+ if (!ieee80211_is_valid_channel(bcm->ieee, channel))
+ return -EINVAL;
if ((radio->manufact == 0x17F) &&
(radio->version == 0x2060) &&
(radio->revision == 1)) {
- if (channel > 200)
- return -EINVAL;
freq = channel2freq_a(channel);
r8 = bcm43xx_radio_read16(bcm, 0x0008);
@@ -1651,9 +1651,6 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm,
TODO(); //TODO: TSSI2dbm workaround
bcm43xx_phy_xmitpower(bcm);//FIXME correct?
} else {
- if ((channel < 1) || (channel > 14))
- return -EINVAL;
-
if (synthetic_pu_workaround)
bcm43xx_synth_pu_workaround(bcm, channel);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index c35cb3a0777e..5c36e29efff7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -119,7 +119,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
channel = bcm43xx_freq_to_channel(bcm, data->freq.m);
freq = data->freq.m;
}
- if (!bcm43xx_is_valid_channel(bcm, channel))
+ if (!ieee80211_is_valid_channel(bcm->ieee, channel))
goto out_unlock;
if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
//ieee80211softmac_disassoc(softmac, $REASON);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
index d8ece28c079f..6dbd855b3647 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
@@ -296,11 +296,14 @@ void bcm43xx_generate_txhdr(struct bcm43xx_private *bcm,
u16 control = 0;
u16 wsec_rate = 0;
u16 encrypt_frame;
+ const u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(wireless_header->frame_ctl));
+ const int is_mgt = (ftype == IEEE80211_FTYPE_MGMT);
/* Now construct the TX header. */
memset(txhdr, 0, sizeof(*txhdr));
- bitrate = bcm->softmac->txrates.default_rate;
+ bitrate = ieee80211softmac_suggest_txrate(bcm->softmac,
+ is_multicast_ether_addr(wireless_header->addr1), is_mgt);
ofdm_modulation = !(ieee80211_is_cck_rate(bitrate));
fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate);
fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate));
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index dafaa5ff5aa6..d500012fdc7a 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev)
dev->name, local->fragm_threshold);
}
+ /* Some firmwares lose antenna selection settings on reset */
+ (void) hostap_set_antsel(local);
+
return res;
}
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 49860fa61c30..6dfa041be66d 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -66,10 +66,12 @@ static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
+ PLXDEV(0x1562, 0x0001, "Symbol LA-4123"),
PLXDEV(0x1385, 0x4100, "Netgear MA301"),
PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"),
PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"),
PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"),
+ PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"),
PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"),
PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"),
PLXDEV(0x16ab, 0x1103, "Longshine 8031"),
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- if (erq->pointer) {
+ if (erq->length > 0) {
if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
index = priv->tx_key;
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
if (erq->flags & IW_ENCODE_RESTRICTED)
restricted = 1;
- if (erq->pointer) {
+ if (erq->pointer && erq->length > 0) {
priv->keys[index].len = cpu_to_le16(xlen);
memset(priv->keys[index].data, 0,
sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 15465278c789..bcc7038130f6 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -34,8 +34,6 @@
#include "orinoco.h"
-static unsigned char *primsym;
-static unsigned char *secsym;
static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
@@ -244,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
u_int save_cor;
/* Doing it if hardware is gone is guaranteed crash */
- if (pcmcia_dev_present(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
/* Save original COR value */
@@ -440,7 +438,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
*/
static int
spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
- const unsigned char *image)
+ const unsigned char *image, int secondary)
{
int ret;
const unsigned char *ptr;
@@ -455,7 +453,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
first_block = (const struct dblock *) ptr;
/* Read the PDA */
- if (image != primsym) {
+ if (secondary) {
ret = spectrum_read_pda(hw, pda, sizeof(pda));
if (ret)
return ret;
@@ -472,7 +470,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
return ret;
/* Write the PDA to the adapter */
- if (image != primsym) {
+ if (secondary) {
ret = spectrum_apply_pda(hw, first_block, pda);
if (ret)
return ret;
@@ -487,7 +485,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
ret = hermes_init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
- if (image != primsym && ret != 0)
+ if (secondary && ret != 0)
return -ENODEV;
/* And this should work with any firmware */
@@ -509,33 +507,30 @@ spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link)
const struct firmware *fw_entry;
if (request_firmware(&fw_entry, primary_fw_name,
- &handle_to_dev(link)) == 0) {
- primsym = fw_entry->data;
- } else {
+ &handle_to_dev(link)) != 0) {
printk(KERN_ERR PFX "Cannot find firmware: %s\n",
primary_fw_name);
return -ENOENT;
}
- if (request_firmware(&fw_entry, secondary_fw_name,
- &handle_to_dev(link)) == 0) {
- secsym = fw_entry->data;
- } else {
- printk(KERN_ERR PFX "Cannot find firmware: %s\n",
- secondary_fw_name);
- return -ENOENT;
- }
-
/* Load primary firmware */
- ret = spectrum_dl_image(hw, link, primsym);
+ ret = spectrum_dl_image(hw, link, fw_entry->data, 0);
+ release_firmware(fw_entry);
if (ret) {
printk(KERN_ERR PFX "Primary firmware download failed\n");
return ret;
}
- /* Load secondary firmware */
- ret = spectrum_dl_image(hw, link, secsym);
+ if (request_firmware(&fw_entry, secondary_fw_name,
+ &handle_to_dev(link)) != 0) {
+ printk(KERN_ERR PFX "Cannot find firmware: %s\n",
+ secondary_fw_name);
+ return -ENOENT;
+ }
+ /* Load secondary firmware */
+ ret = spectrum_dl_image(hw, link, fw_entry->data, 1);
+ release_firmware(fw_entry);
if (ret) {
printk(KERN_ERR PFX "Secondary firmware download failed\n");
}
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index fd31885c6844..ccaf28e8db0a 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr,
struct net_device *dev)
{
struct neighbour *neighbor_entry;
+ int ret = 0;
neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
@@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr,
neighbor_entry->used = jiffies;
if (neighbor_entry->nud_state & NUD_VALID) {
memcpy(haddr, neighbor_entry->ha, dev->addr_len);
- return 1;
+ ret = 1;
}
+ neigh_release(neighbor_entry);
}
- return 0;
+ return ret;
}
static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
zd->dev->name);
usb_set_intfdata(interface, zd);
+ zd1201_enable(zd); /* zd1201 likes to startup enabled, */
+ zd1201_disable(zd); /* interfering with all the wifis in range */
return 0;
err_net:
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
new file mode 100644
index 000000000000..66ed55bc5460
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -0,0 +1,19 @@
+config ZD1211RW
+ tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
+ depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
+ select FW_LOADER
+ ---help---
+ This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
+ chip, present in many USB-wireless adapters.
+
+ Device firmware is required alongside this driver. You can download the
+ firmware distribution from http://zd1211.ath.cx/get-firmware
+
+config ZD1211RW_DEBUG
+ bool "ZyDAS ZD1211 debugging"
+ depends on ZD1211RW
+ ---help---
+ ZD1211 debugging messages. Choosing Y will result in additional debug
+ messages being saved to your kernel logs, which may help debug any
+ problems.
+
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
new file mode 100644
index 000000000000..500314fc74d2
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_ZD1211RW) += zd1211rw.o
+
+zd1211rw-objs := zd_chip.o zd_ieee80211.o \
+ zd_mac.o zd_netdev.o \
+ zd_rf_al2230.o zd_rf_rf2959.o \
+ zd_rf.o zd_usb.o zd_util.o
+
+ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
new file mode 100644
index 000000000000..aa792821854e
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -0,0 +1,1646 @@
+/* zd_chip.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* This file implements all the hardware specific functions for the ZD1211
+ * and ZD1211B chips. Support for the ZD1211B was possible after Timothy
+ * Legge sent me a ZD1211B device. Thank you Tim. -- Uli
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "zd_def.h"
+#include "zd_chip.h"
+#include "zd_ieee80211.h"
+#include "zd_mac.h"
+#include "zd_rf.h"
+#include "zd_util.h"
+
+void zd_chip_init(struct zd_chip *chip,
+ struct net_device *netdev,
+ struct usb_interface *intf)
+{
+ memset(chip, 0, sizeof(*chip));
+ mutex_init(&chip->mutex);
+ zd_usb_init(&chip->usb, netdev, intf);
+ zd_rf_init(&chip->rf);
+}
+
+void zd_chip_clear(struct zd_chip *chip)
+{
+ mutex_lock(&chip->mutex);
+ zd_usb_clear(&chip->usb);
+ zd_rf_clear(&chip->rf);
+ mutex_unlock(&chip->mutex);
+ mutex_destroy(&chip->mutex);
+ memset(chip, 0, sizeof(*chip));
+}
+
+static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
+{
+ return scnprintf(buffer, size, "%02x-%02x-%02x",
+ addr[0], addr[1], addr[2]);
+}
+
+/* Prints an identifier line, which will support debugging. */
+static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
+{
+ int i = 0;
+
+ i = scnprintf(buffer, size, "zd1211%s chip ",
+ chip->is_zd1211b ? "b" : "");
+ i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i);
+ i += scnprintf(buffer+i, size-i, " ");
+ i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
+ i += scnprintf(buffer+i, size-i, " ");
+ i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
+ i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c", chip->pa_type,
+ chip->patch_cck_gain ? 'g' : '-',
+ chip->patch_cr157 ? '7' : '-',
+ chip->patch_6m_band_edge ? '6' : '-');
+ return i;
+}
+
+static void print_id(struct zd_chip *chip)
+{
+ char buffer[80];
+
+ scnprint_id(chip, buffer, sizeof(buffer));
+ buffer[sizeof(buffer)-1] = 0;
+ dev_info(zd_chip_dev(chip), "%s\n", buffer);
+}
+
+/* Read a variable number of 32-bit values. Parameter count is not allowed to
+ * exceed USB_MAX_IOREAD32_COUNT.
+ */
+int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr,
+ unsigned int count)
+{
+ int r;
+ int i;
+ zd_addr_t *a16 = (zd_addr_t *)NULL;
+ u16 *v16;
+ unsigned int count16;
+
+ if (count > USB_MAX_IOREAD32_COUNT)
+ return -EINVAL;
+
+ /* Allocate a single memory block for values and addresses. */
+ count16 = 2*count;
+ a16 = (zd_addr_t *)kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
+ GFP_NOFS);
+ if (!a16) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error ENOMEM in allocation of a16\n");
+ r = -ENOMEM;
+ goto out;
+ }
+ v16 = (u16 *)(a16 + count16);
+
+ for (i = 0; i < count; i++) {
+ int j = 2*i;
+ /* We read the high word always first. */
+ a16[j] = zd_inc_word(addr[i]);
+ a16[j+1] = addr[i];
+ }
+
+ r = zd_ioread16v_locked(chip, v16, a16, count16);
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error: zd_ioread16v_locked. Error number %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ int j = 2*i;
+ values[i] = (v16[j] << 16) | v16[j+1];
+ }
+
+out:
+ kfree((void *)a16);
+ return r;
+}
+
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
+{
+ int i, j, r;
+ struct zd_ioreq16 *ioreqs16;
+ unsigned int count16;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+ if (count == 0)
+ return 0;
+ if (count > USB_MAX_IOWRITE32_COUNT)
+ return -EINVAL;
+
+ /* Allocate a single memory block for values and addresses. */
+ count16 = 2*count;
+ ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_NOFS);
+ if (!ioreqs16) {
+ r = -ENOMEM;
+ dev_dbg_f(zd_chip_dev(chip),
+ "error %d in ioreqs16 allocation\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ j = 2*i;
+ /* We write the high word always first. */
+ ioreqs16[j].value = ioreqs[i].value >> 16;
+ ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr);
+ ioreqs16[j+1].value = ioreqs[i].value;
+ ioreqs16[j+1].addr = ioreqs[i].addr;
+ }
+
+ r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
+#ifdef DEBUG
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error %d in zd_usb_write16v\n", r);
+ }
+#endif /* DEBUG */
+out:
+ kfree(ioreqs16);
+ return r;
+}
+
+int zd_iowrite16a_locked(struct zd_chip *chip,
+ const struct zd_ioreq16 *ioreqs, unsigned int count)
+{
+ int r;
+ unsigned int i, j, t, max;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ for (i = 0; i < count; i += j + t) {
+ t = 0;
+ max = count-i;
+ if (max > USB_MAX_IOWRITE16_COUNT)
+ max = USB_MAX_IOWRITE16_COUNT;
+ for (j = 0; j < max; j++) {
+ if (!ioreqs[i+j].addr) {
+ t = 1;
+ break;
+ }
+ }
+
+ r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error zd_usb_iowrite16v. Error number %d\n",
+ r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/* Writes a variable number of 32 bit registers. The functions will split
+ * that in several USB requests. A split can be forced by inserting an IO
+ * request with an zero address field.
+ */
+int zd_iowrite32a_locked(struct zd_chip *chip,
+ const struct zd_ioreq32 *ioreqs, unsigned int count)
+{
+ int r;
+ unsigned int i, j, t, max;
+
+ for (i = 0; i < count; i += j + t) {
+ t = 0;
+ max = count-i;
+ if (max > USB_MAX_IOWRITE32_COUNT)
+ max = USB_MAX_IOWRITE32_COUNT;
+ for (j = 0; j < max; j++) {
+ if (!ioreqs[i+j].addr) {
+ t = 1;
+ break;
+ }
+ }
+
+ r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error _zd_iowrite32v_locked."
+ " Error number %d\n", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_ioread16_locked(chip, value, addr);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_ioread32_locked(chip, value, addr);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite16_locked(chip, value, addr);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite32_locked(chip, value, addr);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
+ u32 *values, unsigned int count)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_ioread32v_locked(chip, values, addresses, count);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
+{
+ int r;
+
+ ZD_ASSERT(!mutex_is_locked(&chip->mutex));
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite32a_locked(chip, ioreqs, count);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+static int read_pod(struct zd_chip *chip, u8 *rf_type)
+{
+ int r;
+ u32 value;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_ioread32_locked(chip, &value, E2P_POD);
+ if (r)
+ goto error;
+ dev_dbg_f(zd_chip_dev(chip), "E2P_POD %#010x\n", value);
+
+ /* FIXME: AL2230 handling (Bit 7 in POD) */
+ *rf_type = value & 0x0f;
+ chip->pa_type = (value >> 16) & 0x0f;
+ chip->patch_cck_gain = (value >> 8) & 0x1;
+ chip->patch_cr157 = (value >> 13) & 0x1;
+ chip->patch_6m_band_edge = (value >> 21) & 0x1;
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d "
+ "patch 6M %d\n",
+ zd_rf_name(*rf_type), *rf_type,
+ chip->pa_type, chip->patch_cck_gain,
+ chip->patch_cr157, chip->patch_6m_band_edge);
+ return 0;
+error:
+ *rf_type = 0;
+ chip->pa_type = 0;
+ chip->patch_cck_gain = 0;
+ chip->patch_cr157 = 0;
+ chip->patch_6m_band_edge = 0;
+ return r;
+}
+
+static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr,
+ const zd_addr_t *addr)
+{
+ int r;
+ u32 parts[2];
+
+ r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2);
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error: couldn't read e2p macs. Error number %d\n", r);
+ return r;
+ }
+
+ mac_addr[0] = parts[0];
+ mac_addr[1] = parts[0] >> 8;
+ mac_addr[2] = parts[0] >> 16;
+ mac_addr[3] = parts[0] >> 24;
+ mac_addr[4] = parts[1];
+ mac_addr[5] = parts[1] >> 8;
+
+ return 0;
+}
+
+static int read_e2p_mac_addr(struct zd_chip *chip)
+{
+ static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 };
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr);
+}
+
+/* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and
+ * CR_MAC_ADDR_P2 must be overwritten
+ */
+void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr)
+{
+ mutex_lock(&chip->mutex);
+ memcpy(mac_addr, chip->e2p_mac, ETH_ALEN);
+ mutex_unlock(&chip->mutex);
+}
+
+static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
+{
+ static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 };
+ return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr);
+}
+
+int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
+{
+ int r;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+ mutex_lock(&chip->mutex);
+ r = read_mac_addr(chip, mac_addr);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+ int r;
+ struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_MAC_ADDR_P1 },
+ [1] = { .addr = CR_MAC_ADDR_P2 },
+ };
+
+ reqs[0].value = (mac_addr[3] << 24)
+ | (mac_addr[2] << 16)
+ | (mac_addr[1] << 8)
+ | mac_addr[0];
+ reqs[1].value = (mac_addr[5] << 8)
+ | mac_addr[4];
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "mac addr " MAC_FMT "\n", MAC_ARG(mac_addr));
+
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
+#ifdef DEBUG
+ {
+ u8 tmp[ETH_ALEN];
+ read_mac_addr(chip, tmp);
+ }
+#endif /* DEBUG */
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
+{
+ int r;
+ u32 value;
+
+ mutex_lock(&chip->mutex);
+ r = zd_ioread32_locked(chip, &value, E2P_SUBID);
+ mutex_unlock(&chip->mutex);
+ if (r)
+ return r;
+
+ *regdomain = value >> 16;
+ dev_dbg_f(zd_chip_dev(chip), "regdomain: %#04x\n", *regdomain);
+
+ return 0;
+}
+
+static int read_values(struct zd_chip *chip, u8 *values, size_t count,
+ zd_addr_t e2p_addr, u32 guard)
+{
+ int r;
+ int i;
+ u32 v;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ for (i = 0;;) {
+ r = zd_ioread32_locked(chip, &v, e2p_addr+i/2);
+ if (r)
+ return r;
+ v -= guard;
+ if (i+4 < count) {
+ values[i++] = v;
+ values[i++] = v >> 8;
+ values[i++] = v >> 16;
+ values[i++] = v >> 24;
+ continue;
+ }
+ for (;i < count; i++)
+ values[i] = v >> (8*(i%3));
+ return 0;
+ }
+}
+
+static int read_pwr_cal_values(struct zd_chip *chip)
+{
+ return read_values(chip, chip->pwr_cal_values,
+ E2P_CHANNEL_COUNT, E2P_PWR_CAL_VALUE1,
+ 0);
+}
+
+static int read_pwr_int_values(struct zd_chip *chip)
+{
+ return read_values(chip, chip->pwr_int_values,
+ E2P_CHANNEL_COUNT, E2P_PWR_INT_VALUE1,
+ E2P_PWR_INT_GUARD);
+}
+
+static int read_ofdm_cal_values(struct zd_chip *chip)
+{
+ int r;
+ int i;
+ static const zd_addr_t addresses[] = {
+ E2P_36M_CAL_VALUE1,
+ E2P_48M_CAL_VALUE1,
+ E2P_54M_CAL_VALUE1,
+ };
+
+ for (i = 0; i < 3; i++) {
+ r = read_values(chip, chip->ofdm_cal_values[i],
+ E2P_CHANNEL_COUNT, addresses[i], 0);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int read_cal_int_tables(struct zd_chip *chip)
+{
+ int r;
+
+ r = read_pwr_cal_values(chip);
+ if (r)
+ return r;
+ r = read_pwr_int_values(chip);
+ if (r)
+ return r;
+ r = read_ofdm_cal_values(chip);
+ if (r)
+ return r;
+ return 0;
+}
+
+/* phy means physical registers */
+int zd_chip_lock_phy_regs(struct zd_chip *chip)
+{
+ int r;
+ u32 tmp;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_ioread32_locked(chip, &tmp, CR_REG1);
+ if (r) {
+ dev_err(zd_chip_dev(chip), "error ioread32(CR_REG1): %d\n", r);
+ return r;
+ }
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp & ~UNLOCK_PHY_REGS);
+ tmp &= ~UNLOCK_PHY_REGS;
+
+ r = zd_iowrite32_locked(chip, tmp, CR_REG1);
+ if (r)
+ dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
+ return r;
+}
+
+int zd_chip_unlock_phy_regs(struct zd_chip *chip)
+{
+ int r;
+ u32 tmp;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_ioread32_locked(chip, &tmp, CR_REG1);
+ if (r) {
+ dev_err(zd_chip_dev(chip),
+ "error ioread32(CR_REG1): %d\n", r);
+ return r;
+ }
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp | UNLOCK_PHY_REGS);
+ tmp |= UNLOCK_PHY_REGS;
+
+ r = zd_iowrite32_locked(chip, tmp, CR_REG1);
+ if (r)
+ dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
+ return r;
+}
+
+/* CR157 can be optionally patched by the EEPROM */
+static int patch_cr157(struct zd_chip *chip)
+{
+ int r;
+ u32 value;
+
+ if (!chip->patch_cr157)
+ return 0;
+
+ r = zd_ioread32_locked(chip, &value, E2P_PHY_REG);
+ if (r)
+ return r;
+
+ dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8);
+ return zd_iowrite32_locked(chip, value >> 8, CR157);
+}
+
+/*
+ * 6M band edge can be optionally overwritten for certain RF's
+ * Vendor driver says: for FCC regulation, enabled per HWFeature 6M band edge
+ * bit (for AL2230, AL2230S)
+ */
+static int patch_6m_band_edge(struct zd_chip *chip, int channel)
+{
+ struct zd_ioreq16 ioreqs[] = {
+ { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
+ { CR47, 0x1e },
+ };
+
+ if (!chip->patch_6m_band_edge || !chip->rf.patch_6m_band_edge)
+ return 0;
+
+ /* FIXME: Channel 11 is not the edge for all regulatory domains. */
+ if (channel == 1 || channel == 11)
+ ioreqs[0].value = 0x12;
+
+ dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel);
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int zd1211_hw_reset_phy(struct zd_chip *chip)
+{
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR0, 0x0a }, { CR1, 0x06 }, { CR2, 0x26 },
+ { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xa0 },
+ { CR10, 0x81 }, { CR11, 0x00 }, { CR12, 0x7f },
+ { CR13, 0x8c }, { CR14, 0x80 }, { CR15, 0x3d },
+ { CR16, 0x20 }, { CR17, 0x1e }, { CR18, 0x0a },
+ { CR19, 0x48 }, { CR20, 0x0c }, { CR21, 0x0c },
+ { CR22, 0x23 }, { CR23, 0x90 }, { CR24, 0x14 },
+ { CR25, 0x40 }, { CR26, 0x10 }, { CR27, 0x19 },
+ { CR28, 0x7f }, { CR29, 0x80 }, { CR30, 0x4b },
+ { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 },
+ { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 },
+ { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c },
+ { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 },
+ { CR43, 0x10 }, { CR44, 0x12 }, { CR46, 0xff },
+ { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b },
+ { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 },
+ { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 },
+ { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff },
+ { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 },
+ { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 },
+ { CR79, 0x68 }, { CR80, 0x64 }, { CR81, 0x64 },
+ { CR82, 0x00 }, { CR83, 0x00 }, { CR84, 0x00 },
+ { CR85, 0x02 }, { CR86, 0x00 }, { CR87, 0x00 },
+ { CR88, 0xff }, { CR89, 0xfc }, { CR90, 0x00 },
+ { CR91, 0x00 }, { CR92, 0x00 }, { CR93, 0x08 },
+ { CR94, 0x00 }, { CR95, 0x00 }, { CR96, 0xff },
+ { CR97, 0xe7 }, { CR98, 0x00 }, { CR99, 0x00 },
+ { CR100, 0x00 }, { CR101, 0xae }, { CR102, 0x02 },
+ { CR103, 0x00 }, { CR104, 0x03 }, { CR105, 0x65 },
+ { CR106, 0x04 }, { CR107, 0x00 }, { CR108, 0x0a },
+ { CR109, 0xaa }, { CR110, 0xaa }, { CR111, 0x25 },
+ { CR112, 0x25 }, { CR113, 0x00 }, { CR119, 0x1e },
+ { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 },
+ { },
+ { CR5, 0x00 }, { CR6, 0x00 }, { CR7, 0x00 },
+ { CR8, 0x00 }, { CR9, 0x20 }, { CR12, 0xf0 },
+ { CR20, 0x0e }, { CR21, 0x0e }, { CR27, 0x10 },
+ { CR44, 0x33 }, { CR47, 0x1E }, { CR83, 0x24 },
+ { CR84, 0x04 }, { CR85, 0x00 }, { CR86, 0x0C },
+ { CR87, 0x12 }, { CR88, 0x0C }, { CR89, 0x00 },
+ { CR90, 0x10 }, { CR91, 0x08 }, { CR93, 0x00 },
+ { CR94, 0x01 }, { CR95, 0x00 }, { CR96, 0x50 },
+ { CR97, 0x37 }, { CR98, 0x35 }, { CR101, 0x13 },
+ { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 },
+ { CR105, 0x12 }, { CR109, 0x27 }, { CR110, 0x27 },
+ { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 },
+ { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 },
+ { CR117, 0xfc }, { CR118, 0xfa }, { CR120, 0x4f },
+ { CR123, 0x27 }, { CR125, 0xaa }, { CR127, 0x03 },
+ { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
+ { CR131, 0x0C }, { CR136, 0xdf }, { CR137, 0x40 },
+ { CR138, 0xa0 }, { CR139, 0xb0 }, { CR140, 0x99 },
+ { CR141, 0x82 }, { CR142, 0x54 }, { CR143, 0x1c },
+ { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x4c },
+ { CR149, 0x50 }, { CR150, 0x0e }, { CR151, 0x18 },
+ { CR160, 0xfe }, { CR161, 0xee }, { CR162, 0xaa },
+ { CR163, 0xfa }, { CR164, 0xfa }, { CR165, 0xea },
+ { CR166, 0xbe }, { CR167, 0xbe }, { CR168, 0x6a },
+ { CR169, 0xba }, { CR170, 0xba }, { CR171, 0xba },
+ /* Note: CR204 must lead the CR203 */
+ { CR204, 0x7d },
+ { },
+ { CR203, 0x30 },
+ };
+
+ int r, t;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ goto out;
+
+ r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r)
+ goto unlock;
+
+ r = patch_cr157(chip);
+unlock:
+ t = zd_chip_unlock_phy_regs(chip);
+ if (t && !r)
+ r = t;
+out:
+ return r;
+}
+
+static int zd1211b_hw_reset_phy(struct zd_chip *chip)
+{
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR0, 0x14 }, { CR1, 0x06 }, { CR2, 0x26 },
+ { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xe0 },
+ { CR10, 0x81 },
+ /* power control { { CR11, 1 << 6 }, */
+ { CR11, 0x00 },
+ { CR12, 0xf0 }, { CR13, 0x8c }, { CR14, 0x80 },
+ { CR15, 0x3d }, { CR16, 0x20 }, { CR17, 0x1e },
+ { CR18, 0x0a }, { CR19, 0x48 },
+ { CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */
+ { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 },
+ { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 },
+ { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 },
+ { CR30, 0x49 }, /* jointly decoder, no ASIC */
+ { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 },
+ { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 },
+ { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c },
+ { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 },
+ { CR43, 0x10 }, { CR44, 0x33 }, { CR46, 0xff },
+ { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b },
+ { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 },
+ { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 },
+ { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff },
+ { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 },
+ { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 },
+ { CR79, 0xf0 }, { CR80, 0x64 }, { CR81, 0x64 },
+ { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 },
+ { CR85, 0x00 }, { CR86, 0x0c }, { CR87, 0x12 },
+ { CR88, 0x0c }, { CR89, 0x00 }, { CR90, 0x58 },
+ { CR91, 0x04 }, { CR92, 0x00 }, { CR93, 0x00 },
+ { CR94, 0x01 },
+ { CR95, 0x20 }, /* ZD1211B */
+ { CR96, 0x50 }, { CR97, 0x37 }, { CR98, 0x35 },
+ { CR99, 0x00 }, { CR100, 0x01 }, { CR101, 0x13 },
+ { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 },
+ { CR105, 0x12 }, { CR106, 0x04 }, { CR107, 0x00 },
+ { CR108, 0x0a }, { CR109, 0x27 }, { CR110, 0x27 },
+ { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 },
+ { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 },
+ { CR117, 0xfc }, { CR118, 0xfa }, { CR119, 0x1e },
+ { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 },
+ { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
+ { CR131, 0x0c }, { CR136, 0xdf }, { CR137, 0xa0 },
+ { CR138, 0xa8 }, { CR139, 0xb4 }, { CR140, 0x98 },
+ { CR141, 0x82 }, { CR142, 0x53 }, { CR143, 0x1c },
+ { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x40 },
+ { CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */
+ { CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */
+ { CR151, 0x18 }, { CR159, 0x70 }, { CR160, 0xfe },
+ { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa },
+ { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe },
+ { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba },
+ { CR170, 0xba }, { CR171, 0xba },
+ /* Note: CR204 must lead the CR203 */
+ { CR204, 0x7d },
+ {},
+ { CR203, 0x30 },
+ };
+
+ int r, t;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ goto out;
+
+ r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r)
+ goto unlock;
+
+ r = patch_cr157(chip);
+unlock:
+ t = zd_chip_unlock_phy_regs(chip);
+ if (t && !r)
+ r = t;
+out:
+ return r;
+}
+
+static int hw_reset_phy(struct zd_chip *chip)
+{
+ return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) :
+ zd1211_hw_reset_phy(chip);
+}
+
+static int zd1211_hw_init_hmac(struct zd_chip *chip)
+{
+ static const struct zd_ioreq32 ioreqs[] = {
+ { CR_ACK_TIMEOUT_EXT, 0x20 },
+ { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
+ { CR_ZD1211_RETRY_MAX, 0x2 },
+ { CR_SNIFFER_ON, 0 },
+ { CR_RX_FILTER, STA_RX_FILTER },
+ { CR_GROUP_HASH_P1, 0x00 },
+ { CR_GROUP_HASH_P2, 0x80000000 },
+ { CR_REG1, 0xa4 },
+ { CR_ADDA_PWR_DWN, 0x7f },
+ { CR_BCN_PLCP_CFG, 0x00f00401 },
+ { CR_PHY_DELAY, 0x00 },
+ { CR_ACK_TIMEOUT_EXT, 0x80 },
+ { CR_ADDA_PWR_DWN, 0x00 },
+ { CR_ACK_TIME_80211, 0x100 },
+ { CR_IFS_VALUE, 0x547c032 },
+ { CR_RX_PE_DELAY, 0x70 },
+ { CR_PS_CTRL, 0x10000000 },
+ { CR_RTS_CTS_RATE, 0x02030203 },
+ { CR_RX_THRESHOLD, 0x000c0640 },
+ { CR_AFTER_PNP, 0x1 },
+ { CR_WEP_PROTECT, 0x114 },
+ };
+
+ int r;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+#ifdef DEBUG
+ if (r) {
+ dev_err(zd_chip_dev(chip),
+ "error in zd_iowrite32a_locked. Error number %d\n", r);
+ }
+#endif /* DEBUG */
+ return r;
+}
+
+static int zd1211b_hw_init_hmac(struct zd_chip *chip)
+{
+ static const struct zd_ioreq32 ioreqs[] = {
+ { CR_ACK_TIMEOUT_EXT, 0x20 },
+ { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
+ { CR_ZD1211B_RETRY_MAX, 0x02020202 },
+ { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f },
+ { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f },
+ { CR_ZD1211B_TX_PWR_CTL2, 0x003f001f },
+ { CR_ZD1211B_TX_PWR_CTL1, 0x001f000f },
+ { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
+ { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
+ { CR_ZD1211B_TXOP, 0x01800824 },
+ { CR_SNIFFER_ON, 0 },
+ { CR_RX_FILTER, STA_RX_FILTER },
+ { CR_GROUP_HASH_P1, 0x00 },
+ { CR_GROUP_HASH_P2, 0x80000000 },
+ { CR_REG1, 0xa4 },
+ { CR_ADDA_PWR_DWN, 0x7f },
+ { CR_BCN_PLCP_CFG, 0x00f00401 },
+ { CR_PHY_DELAY, 0x00 },
+ { CR_ACK_TIMEOUT_EXT, 0x80 },
+ { CR_ADDA_PWR_DWN, 0x00 },
+ { CR_ACK_TIME_80211, 0x100 },
+ { CR_IFS_VALUE, 0x547c032 },
+ { CR_RX_PE_DELAY, 0x70 },
+ { CR_PS_CTRL, 0x10000000 },
+ { CR_RTS_CTS_RATE, 0x02030203 },
+ { CR_RX_THRESHOLD, 0x000c0640 },
+ { CR_AFTER_PNP, 0x1 },
+ { CR_WEP_PROTECT, 0x114 },
+ };
+
+ int r;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error in zd_iowrite32a_locked. Error number %d\n", r);
+ }
+ return r;
+}
+
+static int hw_init_hmac(struct zd_chip *chip)
+{
+ return chip->is_zd1211b ?
+ zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
+}
+
+struct aw_pt_bi {
+ u32 atim_wnd_period;
+ u32 pre_tbtt;
+ u32 beacon_interval;
+};
+
+static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
+{
+ int r;
+ static const zd_addr_t aw_pt_bi_addr[] =
+ { CR_ATIM_WND_PERIOD, CR_PRE_TBTT, CR_BCN_INTERVAL };
+ u32 values[3];
+
+ r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr,
+ ARRAY_SIZE(aw_pt_bi_addr));
+ if (r) {
+ memset(s, 0, sizeof(*s));
+ return r;
+ }
+
+ s->atim_wnd_period = values[0];
+ s->pre_tbtt = values[1];
+ s->beacon_interval = values[2];
+ dev_dbg_f(zd_chip_dev(chip), "aw %u pt %u bi %u\n",
+ s->atim_wnd_period, s->pre_tbtt, s->beacon_interval);
+ return 0;
+}
+
+static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
+{
+ struct zd_ioreq32 reqs[3];
+
+ if (s->beacon_interval <= 5)
+ s->beacon_interval = 5;
+ if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
+ s->pre_tbtt = s->beacon_interval - 1;
+ if (s->atim_wnd_period >= s->pre_tbtt)
+ s->atim_wnd_period = s->pre_tbtt - 1;
+
+ reqs[0].addr = CR_ATIM_WND_PERIOD;
+ reqs[0].value = s->atim_wnd_period;
+ reqs[1].addr = CR_PRE_TBTT;
+ reqs[1].value = s->pre_tbtt;
+ reqs[2].addr = CR_BCN_INTERVAL;
+ reqs[2].value = s->beacon_interval;
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "aw %u pt %u bi %u\n", s->atim_wnd_period, s->pre_tbtt,
+ s->beacon_interval);
+ return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
+}
+
+
+static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+{
+ int r;
+ struct aw_pt_bi s;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = get_aw_pt_bi(chip, &s);
+ if (r)
+ return r;
+ s.beacon_interval = interval;
+ return set_aw_pt_bi(chip, &s);
+}
+
+int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = set_beacon_interval(chip, interval);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+static int hw_init(struct zd_chip *chip)
+{
+ int r;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = hw_reset_phy(chip);
+ if (r)
+ return r;
+
+ r = hw_init_hmac(chip);
+ if (r)
+ return r;
+ r = set_beacon_interval(chip, 100);
+ if (r)
+ return r;
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
+ const char *addr_string)
+{
+ int r;
+ u32 value;
+
+ r = zd_ioread32_locked(chip, &value, addr);
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip),
+ "error reading %s. Error number %d\n", addr_string, r);
+ return r;
+ }
+
+ dev_dbg_f(zd_chip_dev(chip), "%s %#010x\n",
+ addr_string, (unsigned int)value);
+ return 0;
+}
+
+static int test_init(struct zd_chip *chip)
+{
+ int r;
+
+ r = dump_cr(chip, CR_AFTER_PNP, "CR_AFTER_PNP");
+ if (r)
+ return r;
+ r = dump_cr(chip, CR_GPI_EN, "CR_GPI_EN");
+ if (r)
+ return r;
+ return dump_cr(chip, CR_INTERRUPT, "CR_INTERRUPT");
+}
+
+static void dump_fw_registers(struct zd_chip *chip)
+{
+ static const zd_addr_t addr[4] = {
+ FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE,
+ FW_LINK_STATUS
+ };
+
+ int r;
+ u16 values[4];
+
+ r = zd_ioread16v_locked(chip, values, (const zd_addr_t*)addr,
+ ARRAY_SIZE(addr));
+ if (r) {
+ dev_dbg_f(zd_chip_dev(chip), "error %d zd_ioread16v_locked\n",
+ r);
+ return;
+ }
+
+ dev_dbg_f(zd_chip_dev(chip), "FW_FIRMWARE_VER %#06hx\n", values[0]);
+ dev_dbg_f(zd_chip_dev(chip), "FW_USB_SPEED %#06hx\n", values[1]);
+ dev_dbg_f(zd_chip_dev(chip), "FW_FIX_TX_RATE %#06hx\n", values[2]);
+ dev_dbg_f(zd_chip_dev(chip), "FW_LINK_STATUS %#06hx\n", values[3]);
+}
+#endif /* DEBUG */
+
+static int print_fw_version(struct zd_chip *chip)
+{
+ int r;
+ u16 version;
+
+ r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER);
+ if (r)
+ return r;
+
+ dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version);
+ return 0;
+}
+
+static int set_mandatory_rates(struct zd_chip *chip, enum ieee80211_std std)
+{
+ u32 rates;
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ /* This sets the mandatory rates, which only depend from the standard
+ * that the device is supporting. Until further notice we should try
+ * to support 802.11g also for full speed USB.
+ */
+ switch (std) {
+ case IEEE80211B:
+ rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M;
+ break;
+ case IEEE80211G:
+ rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M|
+ CR_RATE_6M|CR_RATE_12M|CR_RATE_24M;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
+}
+
+int zd_chip_enable_hwint(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite32_locked(chip, HWINT_ENABLED, CR_INTERRUPT);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+static int disable_hwint(struct zd_chip *chip)
+{
+ return zd_iowrite32_locked(chip, HWINT_DISABLED, CR_INTERRUPT);
+}
+
+int zd_chip_disable_hwint(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = disable_hwint(chip);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
+{
+ int r;
+ u8 rf_type;
+
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+
+ mutex_lock(&chip->mutex);
+ chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0;
+
+#ifdef DEBUG
+ r = test_init(chip);
+ if (r)
+ goto out;
+#endif
+ r = zd_iowrite32_locked(chip, 1, CR_AFTER_PNP);
+ if (r)
+ goto out;
+
+ r = zd_usb_init_hw(&chip->usb);
+ if (r)
+ goto out;
+
+ /* GPI is always disabled, also in the other driver.
+ */
+ r = zd_iowrite32_locked(chip, 0, CR_GPI_EN);
+ if (r)
+ goto out;
+ r = zd_iowrite32_locked(chip, CWIN_SIZE, CR_CWMIN_CWMAX);
+ if (r)
+ goto out;
+ /* Currently we support IEEE 802.11g for full and high speed USB.
+ * It might be discussed, whether we should suppport pure b mode for
+ * full speed USB.
+ */
+ r = set_mandatory_rates(chip, IEEE80211G);
+ if (r)
+ goto out;
+ /* Disabling interrupts is certainly a smart thing here.
+ */
+ r = disable_hwint(chip);
+ if (r)
+ goto out;
+ r = read_pod(chip, &rf_type);
+ if (r)
+ goto out;
+ r = hw_init(chip);
+ if (r)
+ goto out;
+ r = zd_rf_init_hw(&chip->rf, rf_type);
+ if (r)
+ goto out;
+
+ r = print_fw_version(chip);
+ if (r)
+ goto out;
+
+#ifdef DEBUG
+ dump_fw_registers(chip);
+ r = test_init(chip);
+ if (r)
+ goto out;
+#endif /* DEBUG */
+
+ r = read_e2p_mac_addr(chip);
+ if (r)
+ goto out;
+
+ r = read_cal_int_tables(chip);
+ if (r)
+ goto out;
+
+ print_id(chip);
+out:
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+static int update_pwr_int(struct zd_chip *chip, u8 channel)
+{
+ u8 value = chip->pwr_int_values[channel - 1];
+ dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_int %#04x\n",
+ channel, value);
+ return zd_iowrite32_locked(chip, value, CR31);
+}
+
+static int update_pwr_cal(struct zd_chip *chip, u8 channel)
+{
+ u8 value = chip->pwr_cal_values[channel-1];
+ dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_cal %#04x\n",
+ channel, value);
+ return zd_iowrite32_locked(chip, value, CR68);
+}
+
+static int update_ofdm_cal(struct zd_chip *chip, u8 channel)
+{
+ struct zd_ioreq32 ioreqs[3];
+
+ ioreqs[0].addr = CR67;
+ ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1];
+ ioreqs[1].addr = CR66;
+ ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1];
+ ioreqs[2].addr = CR65;
+ ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1];
+
+ dev_dbg_f(zd_chip_dev(chip),
+ "channel %d ofdm_cal 36M %#04x 48M %#04x 54M %#04x\n",
+ channel, ioreqs[0].value, ioreqs[1].value, ioreqs[2].value);
+ return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int update_channel_integration_and_calibration(struct zd_chip *chip,
+ u8 channel)
+{
+ int r;
+
+ r = update_pwr_int(chip, channel);
+ if (r)
+ return r;
+ if (chip->is_zd1211b) {
+ static const struct zd_ioreq32 ioreqs[] = {
+ { CR69, 0x28 },
+ {},
+ { CR69, 0x2a },
+ };
+
+ r = update_ofdm_cal(chip, channel);
+ if (r)
+ return r;
+ r = update_pwr_cal(chip, channel);
+ if (r)
+ return r;
+ r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/* The CCK baseband gain can be optionally patched by the EEPROM */
+static int patch_cck_gain(struct zd_chip *chip)
+{
+ int r;
+ u32 value;
+
+ if (!chip->patch_cck_gain)
+ return 0;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_ioread32_locked(chip, &value, E2P_PHY_REG);
+ if (r)
+ return r;
+ dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff);
+ return zd_iowrite32_locked(chip, value & 0xff, CR47);
+}
+
+int zd_chip_set_channel(struct zd_chip *chip, u8 channel)
+{
+ int r, t;
+
+ mutex_lock(&chip->mutex);
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ goto out;
+ r = zd_rf_set_channel(&chip->rf, channel);
+ if (r)
+ goto unlock;
+ r = update_channel_integration_and_calibration(chip, channel);
+ if (r)
+ goto unlock;
+ r = patch_cck_gain(chip);
+ if (r)
+ goto unlock;
+ r = patch_6m_band_edge(chip, channel);
+ if (r)
+ goto unlock;
+ r = zd_iowrite32_locked(chip, 0, CR_CONFIG_PHILIPS);
+unlock:
+ t = zd_chip_unlock_phy_regs(chip);
+ if (t && !r)
+ r = t;
+out:
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+u8 zd_chip_get_channel(struct zd_chip *chip)
+{
+ u8 channel;
+
+ mutex_lock(&chip->mutex);
+ channel = chip->rf.channel;
+ mutex_unlock(&chip->mutex);
+ return channel;
+}
+
+static u16 led_mask(int led)
+{
+ switch (led) {
+ case 1:
+ return LED1;
+ case 2:
+ return LED2;
+ default:
+ return 0;
+ }
+}
+
+static int read_led_reg(struct zd_chip *chip, u16 *status)
+{
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return zd_ioread16_locked(chip, status, CR_LED);
+}
+
+static int write_led_reg(struct zd_chip *chip, u16 status)
+{
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return zd_iowrite16_locked(chip, status, CR_LED);
+}
+
+int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status)
+{
+ int r, ret;
+ u16 mask = led_mask(led);
+ u16 reg;
+
+ if (!mask)
+ return -EINVAL;
+ mutex_lock(&chip->mutex);
+ r = read_led_reg(chip, &reg);
+ if (r)
+ return r;
+ switch (status) {
+ case LED_STATUS:
+ return (reg & mask) ? LED_ON : LED_OFF;
+ case LED_OFF:
+ reg &= ~mask;
+ ret = LED_OFF;
+ break;
+ case LED_FLIP:
+ reg ^= mask;
+ ret = (reg&mask) ? LED_ON : LED_OFF;
+ break;
+ case LED_ON:
+ reg |= mask;
+ ret = LED_ON;
+ break;
+ default:
+ return -EINVAL;
+ }
+ r = write_led_reg(chip, reg);
+ if (r) {
+ ret = r;
+ goto out;
+ }
+out:
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_chip_led_flip(struct zd_chip *chip, int led,
+ const unsigned int *phases_msecs, unsigned int count)
+{
+ int i, r;
+ enum led_status status;
+
+ r = zd_chip_led_status(chip, led, LED_STATUS);
+ if (r)
+ return r;
+ status = r;
+ for (i = 0; i < count; i++) {
+ r = zd_chip_led_status(chip, led, LED_FLIP);
+ if (r < 0)
+ goto out;
+ msleep(phases_msecs[i]);
+ }
+
+out:
+ zd_chip_led_status(chip, led, status);
+ return r;
+}
+
+int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
+{
+ int r;
+
+ if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
+{
+ static const u16 constants[] = {
+ 715, 655, 585, 540, 470, 410, 360, 315,
+ 270, 235, 205, 175, 150, 125, 105, 85,
+ 65, 50, 40, 25, 15
+ };
+
+ int i;
+ u32 x;
+
+ /* It seems that their quality parameter is somehow per signal
+ * and is now transferred per bit.
+ */
+ switch (rate) {
+ case ZD_OFDM_RATE_6M:
+ case ZD_OFDM_RATE_12M:
+ case ZD_OFDM_RATE_24M:
+ size *= 2;
+ break;
+ case ZD_OFDM_RATE_9M:
+ case ZD_OFDM_RATE_18M:
+ case ZD_OFDM_RATE_36M:
+ case ZD_OFDM_RATE_54M:
+ size *= 4;
+ size /= 3;
+ break;
+ case ZD_OFDM_RATE_48M:
+ size *= 3;
+ size /= 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ x = (10000 * status_quality)/size;
+ for (i = 0; i < ARRAY_SIZE(constants); i++) {
+ if (x > constants[i])
+ break;
+ }
+
+ switch (rate) {
+ case ZD_OFDM_RATE_6M:
+ case ZD_OFDM_RATE_9M:
+ i += 3;
+ break;
+ case ZD_OFDM_RATE_12M:
+ case ZD_OFDM_RATE_18M:
+ i += 5;
+ break;
+ case ZD_OFDM_RATE_24M:
+ case ZD_OFDM_RATE_36M:
+ i += 9;
+ break;
+ case ZD_OFDM_RATE_48M:
+ case ZD_OFDM_RATE_54M:
+ i += 15;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+static int ofdm_qual_percent(u8 status_quality, u8 rate, unsigned int size)
+{
+ int r;
+
+ r = ofdm_qual_db(status_quality, rate, size);
+ ZD_ASSERT(r >= 0);
+ if (r < 0)
+ r = 0;
+
+ r = (r * 100)/29;
+ return r <= 100 ? r : 100;
+}
+
+static unsigned int log10times100(unsigned int x)
+{
+ static const u8 log10[] = {
+ 0,
+ 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
+ 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
+ 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
+ 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
+ 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
+ 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
+ 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
+ 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
+ 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
+ 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
+ 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
+ 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
+ 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
+ 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
+ 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
+ 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
+ 223, 223, 223, 224, 224, 224, 224,
+ };
+
+ return x < ARRAY_SIZE(log10) ? log10[x] : 225;
+}
+
+enum {
+ MAX_CCK_EVM_DB = 45,
+};
+
+static int cck_evm_db(u8 status_quality)
+{
+ return (20 * log10times100(status_quality)) / 100;
+}
+
+static int cck_snr_db(u8 status_quality)
+{
+ int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
+ ZD_ASSERT(r >= 0);
+ return r;
+}
+
+static int cck_qual_percent(u8 status_quality)
+{
+ int r;
+
+ r = cck_snr_db(status_quality);
+ r = (100*r)/17;
+ return r <= 100 ? r : 100;
+}
+
+u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
+ const struct rx_status *status)
+{
+ return (status->frame_status&ZD_RX_OFDM) ?
+ ofdm_qual_percent(status->signal_quality_ofdm,
+ zd_ofdm_plcp_header_rate(rx_frame),
+ size) :
+ cck_qual_percent(status->signal_quality_cck);
+}
+
+u8 zd_rx_strength_percent(u8 rssi)
+{
+ int r = (rssi*100) / 41;
+ if (r > 100)
+ r = 100;
+ return (u8) r;
+}
+
+u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status)
+{
+ static const u16 ofdm_rates[] = {
+ [ZD_OFDM_RATE_6M] = 60,
+ [ZD_OFDM_RATE_9M] = 90,
+ [ZD_OFDM_RATE_12M] = 120,
+ [ZD_OFDM_RATE_18M] = 180,
+ [ZD_OFDM_RATE_24M] = 240,
+ [ZD_OFDM_RATE_36M] = 360,
+ [ZD_OFDM_RATE_48M] = 480,
+ [ZD_OFDM_RATE_54M] = 540,
+ };
+ u16 rate;
+ if (status->frame_status & ZD_RX_OFDM) {
+ u8 ofdm_rate = zd_ofdm_plcp_header_rate(rx_frame);
+ rate = ofdm_rates[ofdm_rate & 0xf];
+ } else {
+ u8 cck_rate = zd_cck_plcp_header_rate(rx_frame);
+ switch (cck_rate) {
+ case ZD_CCK_SIGNAL_1M:
+ rate = 10;
+ break;
+ case ZD_CCK_SIGNAL_2M:
+ rate = 20;
+ break;
+ case ZD_CCK_SIGNAL_5M5:
+ rate = 55;
+ break;
+ case ZD_CCK_SIGNAL_11M:
+ rate = 110;
+ break;
+ default:
+ rate = 0;
+ }
+ }
+
+ return rate;
+}
+
+int zd_chip_switch_radio_on(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_switch_radio_on(&chip->rf);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_chip_switch_radio_off(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_switch_radio_off(&chip->rf);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+int zd_chip_enable_int(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_usb_enable_int(&chip->usb);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+void zd_chip_disable_int(struct zd_chip *chip)
+{
+ mutex_lock(&chip->mutex);
+ zd_usb_disable_int(&chip->usb);
+ mutex_unlock(&chip->mutex);
+}
+
+int zd_chip_enable_rx(struct zd_chip *chip)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_usb_enable_rx(&chip->usb);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
+
+void zd_chip_disable_rx(struct zd_chip *chip)
+{
+ mutex_lock(&chip->mutex);
+ zd_usb_disable_rx(&chip->usb);
+ mutex_unlock(&chip->mutex);
+}
+
+int zd_rfwritev_locked(struct zd_chip *chip,
+ const u32* values, unsigned int count, u8 bits)
+{
+ int r;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ r = zd_rfwrite_locked(chip, values[i], bits);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
new file mode 100644
index 000000000000..069d2b467339
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -0,0 +1,827 @@
+/* zd_chip.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_CHIP_H
+#define _ZD_CHIP_H
+
+#include "zd_types.h"
+#include "zd_rf.h"
+#include "zd_usb.h"
+
+/* Header for the Media Access Controller (MAC) and the Baseband Processor
+ * (BBP). It appears that the ZD1211 wraps the old ZD1205 with USB glue and
+ * adds a processor for handling the USB protocol.
+ */
+
+/* 8-bit hardware registers */
+#define CR0 CTL_REG(0x0000)
+#define CR1 CTL_REG(0x0004)
+#define CR2 CTL_REG(0x0008)
+#define CR3 CTL_REG(0x000C)
+
+#define CR5 CTL_REG(0x0010)
+/* bit 5: if set short preamble used
+ * bit 6: filter band - Japan channel 14 on, else off
+ */
+#define CR6 CTL_REG(0x0014)
+#define CR7 CTL_REG(0x0018)
+#define CR8 CTL_REG(0x001C)
+
+#define CR4 CTL_REG(0x0020)
+
+#define CR9 CTL_REG(0x0024)
+/* bit 2: antenna switch (together with CR10) */
+#define CR10 CTL_REG(0x0028)
+/* bit 1: antenna switch (together with CR9)
+ * RF2959 controls with CR11 radion on and off
+ */
+#define CR11 CTL_REG(0x002C)
+/* bit 6: TX power control for OFDM
+ * RF2959 controls with CR10 radio on and off
+ */
+#define CR12 CTL_REG(0x0030)
+#define CR13 CTL_REG(0x0034)
+#define CR14 CTL_REG(0x0038)
+#define CR15 CTL_REG(0x003C)
+#define CR16 CTL_REG(0x0040)
+#define CR17 CTL_REG(0x0044)
+#define CR18 CTL_REG(0x0048)
+#define CR19 CTL_REG(0x004C)
+#define CR20 CTL_REG(0x0050)
+#define CR21 CTL_REG(0x0054)
+#define CR22 CTL_REG(0x0058)
+#define CR23 CTL_REG(0x005C)
+#define CR24 CTL_REG(0x0060) /* CCA threshold */
+#define CR25 CTL_REG(0x0064)
+#define CR26 CTL_REG(0x0068)
+#define CR27 CTL_REG(0x006C)
+#define CR28 CTL_REG(0x0070)
+#define CR29 CTL_REG(0x0074)
+#define CR30 CTL_REG(0x0078)
+#define CR31 CTL_REG(0x007C) /* TX power control for RF in CCK mode */
+#define CR32 CTL_REG(0x0080)
+#define CR33 CTL_REG(0x0084)
+#define CR34 CTL_REG(0x0088)
+#define CR35 CTL_REG(0x008C)
+#define CR36 CTL_REG(0x0090)
+#define CR37 CTL_REG(0x0094)
+#define CR38 CTL_REG(0x0098)
+#define CR39 CTL_REG(0x009C)
+#define CR40 CTL_REG(0x00A0)
+#define CR41 CTL_REG(0x00A4)
+#define CR42 CTL_REG(0x00A8)
+#define CR43 CTL_REG(0x00AC)
+#define CR44 CTL_REG(0x00B0)
+#define CR45 CTL_REG(0x00B4)
+#define CR46 CTL_REG(0x00B8)
+#define CR47 CTL_REG(0x00BC) /* CCK baseband gain
+ * (patch value might be in EEPROM)
+ */
+#define CR48 CTL_REG(0x00C0)
+#define CR49 CTL_REG(0x00C4)
+#define CR50 CTL_REG(0x00C8)
+#define CR51 CTL_REG(0x00CC) /* TX power control for RF in 6-36M modes */
+#define CR52 CTL_REG(0x00D0) /* TX power control for RF in 48M mode */
+#define CR53 CTL_REG(0x00D4) /* TX power control for RF in 54M mode */
+#define CR54 CTL_REG(0x00D8)
+#define CR55 CTL_REG(0x00DC)
+#define CR56 CTL_REG(0x00E0)
+#define CR57 CTL_REG(0x00E4)
+#define CR58 CTL_REG(0x00E8)
+#define CR59 CTL_REG(0x00EC)
+#define CR60 CTL_REG(0x00F0)
+#define CR61 CTL_REG(0x00F4)
+#define CR62 CTL_REG(0x00F8)
+#define CR63 CTL_REG(0x00FC)
+#define CR64 CTL_REG(0x0100)
+#define CR65 CTL_REG(0x0104) /* OFDM 54M calibration */
+#define CR66 CTL_REG(0x0108) /* OFDM 48M calibration */
+#define CR67 CTL_REG(0x010C) /* OFDM 36M calibration */
+#define CR68 CTL_REG(0x0110) /* CCK calibration */
+#define CR69 CTL_REG(0x0114)
+#define CR70 CTL_REG(0x0118)
+#define CR71 CTL_REG(0x011C)
+#define CR72 CTL_REG(0x0120)
+#define CR73 CTL_REG(0x0124)
+#define CR74 CTL_REG(0x0128)
+#define CR75 CTL_REG(0x012C)
+#define CR76 CTL_REG(0x0130)
+#define CR77 CTL_REG(0x0134)
+#define CR78 CTL_REG(0x0138)
+#define CR79 CTL_REG(0x013C)
+#define CR80 CTL_REG(0x0140)
+#define CR81 CTL_REG(0x0144)
+#define CR82 CTL_REG(0x0148)
+#define CR83 CTL_REG(0x014C)
+#define CR84 CTL_REG(0x0150)
+#define CR85 CTL_REG(0x0154)
+#define CR86 CTL_REG(0x0158)
+#define CR87 CTL_REG(0x015C)
+#define CR88 CTL_REG(0x0160)
+#define CR89 CTL_REG(0x0164)
+#define CR90 CTL_REG(0x0168)
+#define CR91 CTL_REG(0x016C)
+#define CR92 CTL_REG(0x0170)
+#define CR93 CTL_REG(0x0174)
+#define CR94 CTL_REG(0x0178)
+#define CR95 CTL_REG(0x017C)
+#define CR96 CTL_REG(0x0180)
+#define CR97 CTL_REG(0x0184)
+#define CR98 CTL_REG(0x0188)
+#define CR99 CTL_REG(0x018C)
+#define CR100 CTL_REG(0x0190)
+#define CR101 CTL_REG(0x0194)
+#define CR102 CTL_REG(0x0198)
+#define CR103 CTL_REG(0x019C)
+#define CR104 CTL_REG(0x01A0)
+#define CR105 CTL_REG(0x01A4)
+#define CR106 CTL_REG(0x01A8)
+#define CR107 CTL_REG(0x01AC)
+#define CR108 CTL_REG(0x01B0)
+#define CR109 CTL_REG(0x01B4)
+#define CR110 CTL_REG(0x01B8)
+#define CR111 CTL_REG(0x01BC)
+#define CR112 CTL_REG(0x01C0)
+#define CR113 CTL_REG(0x01C4)
+#define CR114 CTL_REG(0x01C8)
+#define CR115 CTL_REG(0x01CC)
+#define CR116 CTL_REG(0x01D0)
+#define CR117 CTL_REG(0x01D4)
+#define CR118 CTL_REG(0x01D8)
+#define CR119 CTL_REG(0x01DC)
+#define CR120 CTL_REG(0x01E0)
+#define CR121 CTL_REG(0x01E4)
+#define CR122 CTL_REG(0x01E8)
+#define CR123 CTL_REG(0x01EC)
+#define CR124 CTL_REG(0x01F0)
+#define CR125 CTL_REG(0x01F4)
+#define CR126 CTL_REG(0x01F8)
+#define CR127 CTL_REG(0x01FC)
+#define CR128 CTL_REG(0x0200)
+#define CR129 CTL_REG(0x0204)
+#define CR130 CTL_REG(0x0208)
+#define CR131 CTL_REG(0x020C)
+#define CR132 CTL_REG(0x0210)
+#define CR133 CTL_REG(0x0214)
+#define CR134 CTL_REG(0x0218)
+#define CR135 CTL_REG(0x021C)
+#define CR136 CTL_REG(0x0220)
+#define CR137 CTL_REG(0x0224)
+#define CR138 CTL_REG(0x0228)
+#define CR139 CTL_REG(0x022C)
+#define CR140 CTL_REG(0x0230)
+#define CR141 CTL_REG(0x0234)
+#define CR142 CTL_REG(0x0238)
+#define CR143 CTL_REG(0x023C)
+#define CR144 CTL_REG(0x0240)
+#define CR145 CTL_REG(0x0244)
+#define CR146 CTL_REG(0x0248)
+#define CR147 CTL_REG(0x024C)
+#define CR148 CTL_REG(0x0250)
+#define CR149 CTL_REG(0x0254)
+#define CR150 CTL_REG(0x0258)
+#define CR151 CTL_REG(0x025C)
+#define CR152 CTL_REG(0x0260)
+#define CR153 CTL_REG(0x0264)
+#define CR154 CTL_REG(0x0268)
+#define CR155 CTL_REG(0x026C)
+#define CR156 CTL_REG(0x0270)
+#define CR157 CTL_REG(0x0274)
+#define CR158 CTL_REG(0x0278)
+#define CR159 CTL_REG(0x027C)
+#define CR160 CTL_REG(0x0280)
+#define CR161 CTL_REG(0x0284)
+#define CR162 CTL_REG(0x0288)
+#define CR163 CTL_REG(0x028C)
+#define CR164 CTL_REG(0x0290)
+#define CR165 CTL_REG(0x0294)
+#define CR166 CTL_REG(0x0298)
+#define CR167 CTL_REG(0x029C)
+#define CR168 CTL_REG(0x02A0)
+#define CR169 CTL_REG(0x02A4)
+#define CR170 CTL_REG(0x02A8)
+#define CR171 CTL_REG(0x02AC)
+#define CR172 CTL_REG(0x02B0)
+#define CR173 CTL_REG(0x02B4)
+#define CR174 CTL_REG(0x02B8)
+#define CR175 CTL_REG(0x02BC)
+#define CR176 CTL_REG(0x02C0)
+#define CR177 CTL_REG(0x02C4)
+#define CR178 CTL_REG(0x02C8)
+#define CR179 CTL_REG(0x02CC)
+#define CR180 CTL_REG(0x02D0)
+#define CR181 CTL_REG(0x02D4)
+#define CR182 CTL_REG(0x02D8)
+#define CR183 CTL_REG(0x02DC)
+#define CR184 CTL_REG(0x02E0)
+#define CR185 CTL_REG(0x02E4)
+#define CR186 CTL_REG(0x02E8)
+#define CR187 CTL_REG(0x02EC)
+#define CR188 CTL_REG(0x02F0)
+#define CR189 CTL_REG(0x02F4)
+#define CR190 CTL_REG(0x02F8)
+#define CR191 CTL_REG(0x02FC)
+#define CR192 CTL_REG(0x0300)
+#define CR193 CTL_REG(0x0304)
+#define CR194 CTL_REG(0x0308)
+#define CR195 CTL_REG(0x030C)
+#define CR196 CTL_REG(0x0310)
+#define CR197 CTL_REG(0x0314)
+#define CR198 CTL_REG(0x0318)
+#define CR199 CTL_REG(0x031C)
+#define CR200 CTL_REG(0x0320)
+#define CR201 CTL_REG(0x0324)
+#define CR202 CTL_REG(0x0328)
+#define CR203 CTL_REG(0x032C) /* I2C bus template value & flash control */
+#define CR204 CTL_REG(0x0330)
+#define CR205 CTL_REG(0x0334)
+#define CR206 CTL_REG(0x0338)
+#define CR207 CTL_REG(0x033C)
+#define CR208 CTL_REG(0x0340)
+#define CR209 CTL_REG(0x0344)
+#define CR210 CTL_REG(0x0348)
+#define CR211 CTL_REG(0x034C)
+#define CR212 CTL_REG(0x0350)
+#define CR213 CTL_REG(0x0354)
+#define CR214 CTL_REG(0x0358)
+#define CR215 CTL_REG(0x035C)
+#define CR216 CTL_REG(0x0360)
+#define CR217 CTL_REG(0x0364)
+#define CR218 CTL_REG(0x0368)
+#define CR219 CTL_REG(0x036C)
+#define CR220 CTL_REG(0x0370)
+#define CR221 CTL_REG(0x0374)
+#define CR222 CTL_REG(0x0378)
+#define CR223 CTL_REG(0x037C)
+#define CR224 CTL_REG(0x0380)
+#define CR225 CTL_REG(0x0384)
+#define CR226 CTL_REG(0x0388)
+#define CR227 CTL_REG(0x038C)
+#define CR228 CTL_REG(0x0390)
+#define CR229 CTL_REG(0x0394)
+#define CR230 CTL_REG(0x0398)
+#define CR231 CTL_REG(0x039C)
+#define CR232 CTL_REG(0x03A0)
+#define CR233 CTL_REG(0x03A4)
+#define CR234 CTL_REG(0x03A8)
+#define CR235 CTL_REG(0x03AC)
+#define CR236 CTL_REG(0x03B0)
+
+#define CR240 CTL_REG(0x03C0)
+/* bit 7: host-controlled RF register writes
+ * CR241-CR245: for hardware controlled writing of RF bits, not needed for
+ * USB
+ */
+#define CR241 CTL_REG(0x03C4)
+#define CR242 CTL_REG(0x03C8)
+#define CR243 CTL_REG(0x03CC)
+#define CR244 CTL_REG(0x03D0)
+#define CR245 CTL_REG(0x03D4)
+
+#define CR251 CTL_REG(0x03EC) /* only used for activation and deactivation of
+ * Airoha RFs AL2230 and AL7230B
+ */
+#define CR252 CTL_REG(0x03F0)
+#define CR253 CTL_REG(0x03F4)
+#define CR254 CTL_REG(0x03F8)
+#define CR255 CTL_REG(0x03FC)
+
+#define CR_MAX_PHY_REG 255
+
+/* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211
+ * driver.
+ */
+
+#define CR_RF_IF_CLK CTL_REG(0x0400)
+#define CR_RF_IF_DATA CTL_REG(0x0404)
+#define CR_PE1_PE2 CTL_REG(0x0408)
+#define CR_PE2_DLY CTL_REG(0x040C)
+#define CR_LE1 CTL_REG(0x0410)
+#define CR_LE2 CTL_REG(0x0414)
+/* Seems to enable/disable GPI (General Purpose IO?) */
+#define CR_GPI_EN CTL_REG(0x0418)
+#define CR_RADIO_PD CTL_REG(0x042C)
+#define CR_RF2948_PD CTL_REG(0x042C)
+#define CR_ENABLE_PS_MANUAL_AGC CTL_REG(0x043C)
+#define CR_CONFIG_PHILIPS CTL_REG(0x0440)
+#define CR_SA2400_SER_AP CTL_REG(0x0444)
+#define CR_I2C_WRITE CTL_REG(0x0444)
+#define CR_SA2400_SER_RP CTL_REG(0x0448)
+#define CR_RADIO_PE CTL_REG(0x0458)
+#define CR_RST_BUS_MASTER CTL_REG(0x045C)
+#define CR_RFCFG CTL_REG(0x0464)
+#define CR_HSTSCHG CTL_REG(0x046C)
+#define CR_PHY_ON CTL_REG(0x0474)
+#define CR_RX_DELAY CTL_REG(0x0478)
+#define CR_RX_PE_DELAY CTL_REG(0x047C)
+#define CR_GPIO_1 CTL_REG(0x0490)
+#define CR_GPIO_2 CTL_REG(0x0494)
+#define CR_EncryBufMux CTL_REG(0x04A8)
+#define CR_PS_CTRL CTL_REG(0x0500)
+#define CR_ADDA_PWR_DWN CTL_REG(0x0504)
+#define CR_ADDA_MBIAS_WARMTIME CTL_REG(0x0508)
+#define CR_MAC_PS_STATE CTL_REG(0x050C)
+
+#define CR_INTERRUPT CTL_REG(0x0510)
+#define INT_TX_COMPLETE 0x00000001
+#define INT_RX_COMPLETE 0x00000002
+#define INT_RETRY_FAIL 0x00000004
+#define INT_WAKEUP 0x00000008
+#define INT_DTIM_NOTIFY 0x00000020
+#define INT_CFG_NEXT_BCN 0x00000040
+#define INT_BUS_ABORT 0x00000080
+#define INT_TX_FIFO_READY 0x00000100
+#define INT_UART 0x00000200
+#define INT_TX_COMPLETE_EN 0x00010000
+#define INT_RX_COMPLETE_EN 0x00020000
+#define INT_RETRY_FAIL_EN 0x00040000
+#define INT_WAKEUP_EN 0x00080000
+#define INT_DTIM_NOTIFY_EN 0x00200000
+#define INT_CFG_NEXT_BCN_EN 0x00400000
+#define INT_BUS_ABORT_EN 0x00800000
+#define INT_TX_FIFO_READY_EN 0x01000000
+#define INT_UART_EN 0x02000000
+
+#define CR_TSF_LOW_PART CTL_REG(0x0514)
+#define CR_TSF_HIGH_PART CTL_REG(0x0518)
+
+/* Following three values are in time units (1024us)
+ * Following condition must be met:
+ * atim < tbtt < bcn
+ */
+#define CR_ATIM_WND_PERIOD CTL_REG(0x051C)
+#define CR_BCN_INTERVAL CTL_REG(0x0520)
+#define CR_PRE_TBTT CTL_REG(0x0524)
+/* in units of TU(1024us) */
+
+/* for UART support */
+#define CR_UART_RBR_THR_DLL CTL_REG(0x0540)
+#define CR_UART_DLM_IER CTL_REG(0x0544)
+#define CR_UART_IIR_FCR CTL_REG(0x0548)
+#define CR_UART_LCR CTL_REG(0x054c)
+#define CR_UART_MCR CTL_REG(0x0550)
+#define CR_UART_LSR CTL_REG(0x0554)
+#define CR_UART_MSR CTL_REG(0x0558)
+#define CR_UART_ECR CTL_REG(0x055c)
+#define CR_UART_STATUS CTL_REG(0x0560)
+
+#define CR_PCI_TX_ADDR_P1 CTL_REG(0x0600)
+#define CR_PCI_TX_AddR_P2 CTL_REG(0x0604)
+#define CR_PCI_RX_AddR_P1 CTL_REG(0x0608)
+#define CR_PCI_RX_AddR_P2 CTL_REG(0x060C)
+
+/* must be overwritten if custom MAC address will be used */
+#define CR_MAC_ADDR_P1 CTL_REG(0x0610)
+#define CR_MAC_ADDR_P2 CTL_REG(0x0614)
+#define CR_BSSID_P1 CTL_REG(0x0618)
+#define CR_BSSID_P2 CTL_REG(0x061C)
+#define CR_BCN_PLCP_CFG CTL_REG(0x0620)
+#define CR_GROUP_HASH_P1 CTL_REG(0x0624)
+#define CR_GROUP_HASH_P2 CTL_REG(0x0628)
+#define CR_RX_TIMEOUT CTL_REG(0x062C)
+
+/* Basic rates supported by the BSS. When producing ACK or CTS messages, the
+ * device will use a rate in this table that is less than or equal to the rate
+ * of the incoming frame which prompted the response */
+#define CR_BASIC_RATE_TBL CTL_REG(0x0630)
+#define CR_RATE_1M 0x0001 /* 802.11b */
+#define CR_RATE_2M 0x0002 /* 802.11b */
+#define CR_RATE_5_5M 0x0004 /* 802.11b */
+#define CR_RATE_11M 0x0008 /* 802.11b */
+#define CR_RATE_6M 0x0100 /* 802.11g */
+#define CR_RATE_9M 0x0200 /* 802.11g */
+#define CR_RATE_12M 0x0400 /* 802.11g */
+#define CR_RATE_18M 0x0800 /* 802.11g */
+#define CR_RATE_24M 0x1000 /* 802.11g */
+#define CR_RATE_36M 0x2000 /* 802.11g */
+#define CR_RATE_48M 0x4000 /* 802.11g */
+#define CR_RATE_54M 0x8000 /* 802.11g */
+#define CR_RATES_80211G 0xff00
+#define CR_RATES_80211B 0x000f
+
+/* Mandatory rates required in the BSS. When producing ACK or CTS messages, if
+ * the device could not find an appropriate rate in CR_BASIC_RATE_TBL, it will
+ * look for a rate in this table that is less than or equal to the rate of
+ * the incoming frame. */
+#define CR_MANDATORY_RATE_TBL CTL_REG(0x0634)
+#define CR_RTS_CTS_RATE CTL_REG(0x0638)
+
+#define CR_WEP_PROTECT CTL_REG(0x063C)
+#define CR_RX_THRESHOLD CTL_REG(0x0640)
+
+/* register for controlling the LEDS */
+#define CR_LED CTL_REG(0x0644)
+/* masks for controlling LEDs */
+#define LED1 0x0100
+#define LED2 0x0200
+
+/* Seems to indicate that the configuration is over.
+ */
+#define CR_AFTER_PNP CTL_REG(0x0648)
+#define CR_ACK_TIME_80211 CTL_REG(0x0658)
+
+#define CR_RX_OFFSET CTL_REG(0x065c)
+
+#define CR_PHY_DELAY CTL_REG(0x066C)
+#define CR_BCN_FIFO CTL_REG(0x0670)
+#define CR_SNIFFER_ON CTL_REG(0x0674)
+
+#define CR_ENCRYPTION_TYPE CTL_REG(0x0678)
+#define NO_WEP 0
+#define WEP64 1
+#define WEP128 5
+#define WEP256 6
+#define ENC_SNIFFER 8
+
+#define CR_ZD1211_RETRY_MAX CTL_REG(0x067C)
+
+#define CR_REG1 CTL_REG(0x0680)
+/* Setting the bit UNLOCK_PHY_REGS disallows the write access to physical
+ * registers, so one could argue it is a LOCK bit. But calling it
+ * LOCK_PHY_REGS makes it confusing.
+ */
+#define UNLOCK_PHY_REGS 0x0080
+
+#define CR_DEVICE_STATE CTL_REG(0x0684)
+#define CR_UNDERRUN_CNT CTL_REG(0x0688)
+
+#define CR_RX_FILTER CTL_REG(0x068c)
+#define RX_FILTER_ASSOC_RESPONSE 0x0002
+#define RX_FILTER_REASSOC_RESPONSE 0x0008
+#define RX_FILTER_PROBE_RESPONSE 0x0020
+#define RX_FILTER_BEACON 0x0100
+#define RX_FILTER_DISASSOC 0x0400
+#define RX_FILTER_AUTH 0x0800
+#define AP_RX_FILTER 0x0400feff
+#define STA_RX_FILTER 0x0000ffff
+
+/* Monitor mode sets filter to 0xfffff */
+
+#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
+#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
+#define CR_IFS_VALUE CTL_REG(0x0698)
+#define CR_RX_TIME_OUT CTL_REG(0x069C)
+#define CR_TOTAL_RX_FRM CTL_REG(0x06A0)
+#define CR_CRC32_CNT CTL_REG(0x06A4)
+#define CR_CRC16_CNT CTL_REG(0x06A8)
+#define CR_DECRYPTION_ERR_UNI CTL_REG(0x06AC)
+#define CR_RX_FIFO_OVERRUN CTL_REG(0x06B0)
+
+#define CR_DECRYPTION_ERR_MUL CTL_REG(0x06BC)
+
+#define CR_NAV_CNT CTL_REG(0x06C4)
+#define CR_NAV_CCA CTL_REG(0x06C8)
+#define CR_RETRY_CNT CTL_REG(0x06CC)
+
+#define CR_READ_TCB_ADDR CTL_REG(0x06E8)
+#define CR_READ_RFD_ADDR CTL_REG(0x06EC)
+#define CR_CWMIN_CWMAX CTL_REG(0x06F0)
+#define CR_TOTAL_TX_FRM CTL_REG(0x06F4)
+
+/* CAM: Continuous Access Mode (power management) */
+#define CR_CAM_MODE CTL_REG(0x0700)
+#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704)
+#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708)
+#define CR_CAM_ADDRESS CTL_REG(0x070C)
+#define CR_CAM_DATA CTL_REG(0x0710)
+
+#define CR_ROMDIR CTL_REG(0x0714)
+
+#define CR_DECRY_ERR_FLG_LOW CTL_REG(0x0714)
+#define CR_DECRY_ERR_FLG_HIGH CTL_REG(0x0718)
+
+#define CR_WEPKEY0 CTL_REG(0x0720)
+#define CR_WEPKEY1 CTL_REG(0x0724)
+#define CR_WEPKEY2 CTL_REG(0x0728)
+#define CR_WEPKEY3 CTL_REG(0x072C)
+#define CR_WEPKEY4 CTL_REG(0x0730)
+#define CR_WEPKEY5 CTL_REG(0x0734)
+#define CR_WEPKEY6 CTL_REG(0x0738)
+#define CR_WEPKEY7 CTL_REG(0x073C)
+#define CR_WEPKEY8 CTL_REG(0x0740)
+#define CR_WEPKEY9 CTL_REG(0x0744)
+#define CR_WEPKEY10 CTL_REG(0x0748)
+#define CR_WEPKEY11 CTL_REG(0x074C)
+#define CR_WEPKEY12 CTL_REG(0x0750)
+#define CR_WEPKEY13 CTL_REG(0x0754)
+#define CR_WEPKEY14 CTL_REG(0x0758)
+#define CR_WEPKEY15 CTL_REG(0x075c)
+#define CR_TKIP_MODE CTL_REG(0x0760)
+
+#define CR_EEPROM_PROTECT0 CTL_REG(0x0758)
+#define CR_EEPROM_PROTECT1 CTL_REG(0x075C)
+
+#define CR_DBG_FIFO_RD CTL_REG(0x0800)
+#define CR_DBG_SELECT CTL_REG(0x0804)
+#define CR_FIFO_Length CTL_REG(0x0808)
+
+
+#define CR_RSSI_MGC CTL_REG(0x0810)
+
+#define CR_PON CTL_REG(0x0818)
+#define CR_RX_ON CTL_REG(0x081C)
+#define CR_TX_ON CTL_REG(0x0820)
+#define CR_CHIP_EN CTL_REG(0x0824)
+#define CR_LO_SW CTL_REG(0x0828)
+#define CR_TXRX_SW CTL_REG(0x082C)
+#define CR_S_MD CTL_REG(0x0830)
+
+#define CR_USB_DEBUG_PORT CTL_REG(0x0888)
+
+#define CR_ZD1211B_TX_PWR_CTL1 CTL_REG(0x0b00)
+#define CR_ZD1211B_TX_PWR_CTL2 CTL_REG(0x0b04)
+#define CR_ZD1211B_TX_PWR_CTL3 CTL_REG(0x0b08)
+#define CR_ZD1211B_TX_PWR_CTL4 CTL_REG(0x0b0c)
+#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10)
+#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14)
+#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
+#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
+
+#define CWIN_SIZE 0x007f043f
+
+
+#define HWINT_ENABLED 0x004f0000
+#define HWINT_DISABLED 0
+
+#define E2P_PWR_INT_GUARD 8
+#define E2P_CHANNEL_COUNT 14
+
+/* If you compare this addresses with the ZYDAS orignal driver, please notify
+ * that we use word mapping for the EEPROM.
+ */
+
+/*
+ * Upper 16 bit contains the regulatory domain.
+ */
+#define E2P_SUBID E2P_REG(0x00)
+#define E2P_POD E2P_REG(0x02)
+#define E2P_MAC_ADDR_P1 E2P_REG(0x04)
+#define E2P_MAC_ADDR_P2 E2P_REG(0x06)
+#define E2P_PWR_CAL_VALUE1 E2P_REG(0x08)
+#define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a)
+#define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c)
+#define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e)
+#define E2P_PWR_INT_VALUE1 E2P_REG(0x10)
+#define E2P_PWR_INT_VALUE2 E2P_REG(0x12)
+#define E2P_PWR_INT_VALUE3 E2P_REG(0x14)
+#define E2P_PWR_INT_VALUE4 E2P_REG(0x16)
+
+/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
+ * also only 11 channels. */
+#define E2P_ALLOWED_CHANNEL E2P_REG(0x18)
+
+#define E2P_PHY_REG E2P_REG(0x1a)
+#define E2P_DEVICE_VER E2P_REG(0x20)
+#define E2P_36M_CAL_VALUE1 E2P_REG(0x28)
+#define E2P_36M_CAL_VALUE2 E2P_REG(0x2a)
+#define E2P_36M_CAL_VALUE3 E2P_REG(0x2c)
+#define E2P_36M_CAL_VALUE4 E2P_REG(0x2e)
+#define E2P_11A_INT_VALUE1 E2P_REG(0x30)
+#define E2P_11A_INT_VALUE2 E2P_REG(0x32)
+#define E2P_11A_INT_VALUE3 E2P_REG(0x34)
+#define E2P_11A_INT_VALUE4 E2P_REG(0x36)
+#define E2P_48M_CAL_VALUE1 E2P_REG(0x38)
+#define E2P_48M_CAL_VALUE2 E2P_REG(0x3a)
+#define E2P_48M_CAL_VALUE3 E2P_REG(0x3c)
+#define E2P_48M_CAL_VALUE4 E2P_REG(0x3e)
+#define E2P_48M_INT_VALUE1 E2P_REG(0x40)
+#define E2P_48M_INT_VALUE2 E2P_REG(0x42)
+#define E2P_48M_INT_VALUE3 E2P_REG(0x44)
+#define E2P_48M_INT_VALUE4 E2P_REG(0x46)
+#define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */
+#define E2P_54M_CAL_VALUE2 E2P_REG(0x4a)
+#define E2P_54M_CAL_VALUE3 E2P_REG(0x4c)
+#define E2P_54M_CAL_VALUE4 E2P_REG(0x4e)
+#define E2P_54M_INT_VALUE1 E2P_REG(0x50)
+#define E2P_54M_INT_VALUE2 E2P_REG(0x52)
+#define E2P_54M_INT_VALUE3 E2P_REG(0x54)
+#define E2P_54M_INT_VALUE4 E2P_REG(0x56)
+
+/* All 16 bit values */
+#define FW_FIRMWARE_VER FW_REG(0)
+/* non-zero if USB high speed connection */
+#define FW_USB_SPEED FW_REG(1)
+#define FW_FIX_TX_RATE FW_REG(2)
+/* Seems to be able to control LEDs over the firmware */
+#define FW_LINK_STATUS FW_REG(3)
+#define FW_SOFT_RESET FW_REG(4)
+#define FW_FLASH_CHK FW_REG(5)
+
+enum {
+ CR_BASE_OFFSET = 0x9000,
+ FW_START_OFFSET = 0xee00,
+ FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d,
+ EEPROM_START_OFFSET = 0xf800,
+ EEPROM_SIZE = 0x800, /* words */
+ LOAD_CODE_SIZE = 0xe, /* words */
+ LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
+ EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
+ E2P_BASE_OFFSET = EEPROM_START_OFFSET +
+ EEPROM_REGS_OFFSET,
+};
+
+#define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d)
+
+enum {
+ /* indices for ofdm_cal_values */
+ OFDM_36M_INDEX = 0,
+ OFDM_48M_INDEX = 1,
+ OFDM_54M_INDEX = 2,
+};
+
+struct zd_chip {
+ struct zd_usb usb;
+ struct zd_rf rf;
+ struct mutex mutex;
+ u8 e2p_mac[ETH_ALEN];
+ /* EepSetPoint in the vendor driver */
+ u8 pwr_cal_values[E2P_CHANNEL_COUNT];
+ /* integration values in the vendor driver */
+ u8 pwr_int_values[E2P_CHANNEL_COUNT];
+ /* SetPointOFDM in the vendor driver */
+ u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT];
+ u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
+ is_zd1211b:1;
+};
+
+static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
+{
+ return container_of(usb, struct zd_chip, usb);
+}
+
+static inline struct zd_chip *zd_rf_to_chip(struct zd_rf *rf)
+{
+ return container_of(rf, struct zd_chip, rf);
+}
+
+#define zd_chip_dev(chip) (&(chip)->usb.intf->dev)
+
+void zd_chip_init(struct zd_chip *chip,
+ struct net_device *netdev,
+ struct usb_interface *intf);
+void zd_chip_clear(struct zd_chip *chip);
+int zd_chip_init_hw(struct zd_chip *chip, u8 device_type);
+int zd_chip_reset(struct zd_chip *chip);
+
+static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values,
+ const zd_addr_t *addresses,
+ unsigned int count)
+{
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return zd_usb_ioread16v(&chip->usb, values, addresses, count);
+}
+
+static inline int zd_ioread16_locked(struct zd_chip *chip, u16 *value,
+ const zd_addr_t addr)
+{
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return zd_usb_ioread16(&chip->usb, value, addr);
+}
+
+int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
+ const zd_addr_t *addresses, unsigned int count);
+
+static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
+ const zd_addr_t addr)
+{
+ return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1);
+}
+
+static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
+ zd_addr_t addr)
+{
+ struct zd_ioreq16 ioreq;
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ ioreq.addr = addr;
+ ioreq.value = value;
+
+ return zd_usb_iowrite16v(&chip->usb, &ioreq, 1);
+}
+
+int zd_iowrite16a_locked(struct zd_chip *chip,
+ const struct zd_ioreq16 *ioreqs, unsigned int count);
+
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count);
+
+static inline int zd_iowrite32_locked(struct zd_chip *chip, u32 value,
+ zd_addr_t addr)
+{
+ struct zd_ioreq32 ioreq;
+
+ ioreq.addr = addr;
+ ioreq.value = value;
+
+ return _zd_iowrite32v_locked(chip, &ioreq, 1);
+}
+
+int zd_iowrite32a_locked(struct zd_chip *chip,
+ const struct zd_ioreq32 *ioreqs, unsigned int count);
+
+static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits)
+{
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ return zd_usb_rfwrite(&chip->usb, value, bits);
+}
+
+int zd_rfwritev_locked(struct zd_chip *chip,
+ const u32* values, unsigned int count, u8 bits);
+
+/* Locking functions for reading and writing registers.
+ * The different parameters are intentional.
+ */
+int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value);
+int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value);
+int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value);
+int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value);
+int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
+ u32 *values, unsigned int count);
+int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count);
+
+int zd_chip_set_channel(struct zd_chip *chip, u8 channel);
+static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
+{
+ return chip->rf.channel;
+}
+u8 zd_chip_get_channel(struct zd_chip *chip);
+int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
+void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr);
+int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr);
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_chip_switch_radio_on(struct zd_chip *chip);
+int zd_chip_switch_radio_off(struct zd_chip *chip);
+int zd_chip_enable_int(struct zd_chip *chip);
+void zd_chip_disable_int(struct zd_chip *chip);
+int zd_chip_enable_rx(struct zd_chip *chip);
+void zd_chip_disable_rx(struct zd_chip *chip);
+int zd_chip_enable_hwint(struct zd_chip *chip);
+int zd_chip_disable_hwint(struct zd_chip *chip);
+
+static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type)
+{
+ return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type);
+}
+
+static inline int zd_set_encryption_type(struct zd_chip *chip, u32 type)
+{
+ return zd_iowrite32(chip, CR_ENCRYPTION_TYPE, type);
+}
+
+static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates)
+{
+ return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates);
+}
+
+int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates);
+
+static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter)
+{
+ return zd_iowrite32(chip, CR_RX_FILTER, filter);
+}
+
+int zd_chip_lock_phy_regs(struct zd_chip *chip);
+int zd_chip_unlock_phy_regs(struct zd_chip *chip);
+
+enum led_status {
+ LED_OFF = 0,
+ LED_ON = 1,
+ LED_FLIP = 2,
+ LED_STATUS = 3,
+};
+
+int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status);
+int zd_chip_led_flip(struct zd_chip *chip, int led,
+ const unsigned int *phases_msecs, unsigned int count);
+
+int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+
+static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
+{
+ return zd_ioread32(chip, CR_BCN_INTERVAL, interval);
+}
+
+struct rx_status;
+
+u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
+ const struct rx_status *status);
+u8 zd_rx_strength_percent(u8 rssi);
+
+u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
+
+#endif /* _ZD_CHIP_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
new file mode 100644
index 000000000000..465906812fc4
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -0,0 +1,48 @@
+/* zd_def.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_DEF_H
+#define _ZD_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#define dev_printk_f(level, dev, fmt, args...) \
+ dev_printk(level, dev, "%s() " fmt, __func__, ##args)
+
+#ifdef DEBUG
+# define dev_dbg_f(dev, fmt, args...) \
+ dev_printk_f(KERN_DEBUG, dev, fmt, ## args)
+#else
+# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0)
+#endif /* DEBUG */
+
+#ifdef DEBUG
+# define ZD_ASSERT(x) \
+do { \
+ if (!(x)) { \
+ pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
+ __FILE__, __LINE__, __stringify(x)); \
+ } \
+} while (0)
+#else
+# define ZD_ASSERT(x) do { } while (0)
+#endif
+
+#endif /* _ZD_DEF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
new file mode 100644
index 000000000000..66905f7b61ff
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
@@ -0,0 +1,191 @@
+/* zd_ieee80211.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * A lot of this code is generic and should be moved into the upper layers
+ * at some point.
+ */
+
+#include <linux/errno.h>
+#include <linux/wireless.h>
+#include <linux/kernel.h>
+#include <net/ieee80211.h>
+
+#include "zd_def.h"
+#include "zd_ieee80211.h"
+#include "zd_mac.h"
+
+static const struct channel_range channel_ranges[] = {
+ [0] = { 0, 0},
+ [ZD_REGDOMAIN_FCC] = { 1, 12},
+ [ZD_REGDOMAIN_IC] = { 1, 12},
+ [ZD_REGDOMAIN_ETSI] = { 1, 14},
+ [ZD_REGDOMAIN_JAPAN] = { 1, 14},
+ [ZD_REGDOMAIN_SPAIN] = { 1, 14},
+ [ZD_REGDOMAIN_FRANCE] = { 1, 14},
+ [ZD_REGDOMAIN_JAPAN_ADD] = {14, 15},
+};
+
+const struct channel_range *zd_channel_range(u8 regdomain)
+{
+ if (regdomain >= ARRAY_SIZE(channel_ranges))
+ regdomain = 0;
+ return &channel_ranges[regdomain];
+}
+
+int zd_regdomain_supports_channel(u8 regdomain, u8 channel)
+{
+ const struct channel_range *range = zd_channel_range(regdomain);
+ return range->start <= channel && channel < range->end;
+}
+
+int zd_regdomain_supported(u8 regdomain)
+{
+ const struct channel_range *range = zd_channel_range(regdomain);
+ return range->start != 0;
+}
+
+/* Stores channel frequencies in MHz. */
+static const u16 channel_frequencies[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447,
+ 2452, 2457, 2462, 2467, 2472, 2484,
+};
+
+#define NUM_CHANNELS ARRAY_SIZE(channel_frequencies)
+
+static int compute_freq(struct iw_freq *freq, u32 mhz, u32 hz)
+{
+ u32 factor;
+
+ freq->e = 0;
+ if (mhz >= 1000000000U) {
+ pr_debug("zd1211 mhz %u to large\n", mhz);
+ freq->m = 0;
+ return -EINVAL;
+ }
+
+ factor = 1000;
+ while (mhz >= factor) {
+
+ freq->e += 1;
+ factor *= 10;
+ }
+
+ factor /= 1000U;
+ freq->m = mhz * (1000000U/factor) + hz/factor;
+
+ return 0;
+}
+
+int zd_channel_to_freq(struct iw_freq *freq, u8 channel)
+{
+ if (channel > NUM_CHANNELS) {
+ freq->m = 0;
+ freq->e = 0;
+ return -EINVAL;
+ }
+ if (!channel) {
+ freq->m = 0;
+ freq->e = 0;
+ return -EINVAL;
+ }
+ return compute_freq(freq, channel_frequencies[channel-1], 0);
+}
+
+static int freq_to_mhz(const struct iw_freq *freq)
+{
+ u32 factor;
+ int e;
+
+ /* Such high frequencies are not supported. */
+ if (freq->e > 6)
+ return -EINVAL;
+
+ factor = 1;
+ for (e = freq->e; e > 0; --e) {
+ factor *= 10;
+ }
+ factor = 1000000U / factor;
+
+ if (freq->m % factor) {
+ return -EINVAL;
+ }
+
+ return freq->m / factor;
+}
+
+int zd_find_channel(u8 *channel, const struct iw_freq *freq)
+{
+ int i, r;
+ u32 mhz;
+
+ if (!(freq->flags & IW_FREQ_FIXED))
+ return 0;
+
+ if (freq->m < 1000) {
+ if (freq->m > NUM_CHANNELS || freq->m == 0)
+ return -EINVAL;
+ *channel = freq->m;
+ return 1;
+ }
+
+ r = freq_to_mhz(freq);
+ if (r < 0)
+ return r;
+ mhz = r;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (mhz == channel_frequencies[i]) {
+ *channel = i+1;
+ return 1;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain)
+{
+ struct ieee80211_geo geo;
+ const struct channel_range *range;
+ int i;
+ u8 channel;
+
+ dev_dbg(zd_mac_dev(zd_netdev_mac(ieee->dev)),
+ "regdomain %#04x\n", regdomain);
+
+ range = zd_channel_range(regdomain);
+ if (range->start == 0) {
+ dev_err(zd_mac_dev(zd_netdev_mac(ieee->dev)),
+ "zd1211 regdomain %#04x not supported\n",
+ regdomain);
+ return -EINVAL;
+ }
+
+ memset(&geo, 0, sizeof(geo));
+
+ for (i = 0, channel = range->start; channel < range->end; channel++) {
+ struct ieee80211_channel *chan = &geo.bg[i++];
+ chan->freq = channel_frequencies[channel - 1];
+ chan->channel = channel;
+ }
+
+ geo.bg_channels = i;
+ memcpy(geo.name, "XX ", 4);
+ ieee80211_set_geo(ieee, &geo);
+ return 0;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
new file mode 100644
index 000000000000..36329890dfec
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -0,0 +1,85 @@
+#ifndef _ZD_IEEE80211_H
+#define _ZD_IEEE80211_H
+
+#include <net/ieee80211.h>
+#include "zd_types.h"
+
+/* Additional definitions from the standards.
+ */
+
+#define ZD_REGDOMAIN_FCC 0x10
+#define ZD_REGDOMAIN_IC 0x20
+#define ZD_REGDOMAIN_ETSI 0x30
+#define ZD_REGDOMAIN_SPAIN 0x31
+#define ZD_REGDOMAIN_FRANCE 0x32
+#define ZD_REGDOMAIN_JAPAN_ADD 0x40
+#define ZD_REGDOMAIN_JAPAN 0x41
+
+enum {
+ MIN_CHANNEL24 = 1,
+ MAX_CHANNEL24 = 14,
+};
+
+struct channel_range {
+ u8 start;
+ u8 end; /* exclusive (channel must be less than end) */
+};
+
+struct iw_freq;
+
+int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain);
+
+const struct channel_range *zd_channel_range(u8 regdomain);
+int zd_regdomain_supports_channel(u8 regdomain, u8 channel);
+int zd_regdomain_supported(u8 regdomain);
+
+/* for 2.4 GHz band */
+int zd_channel_to_freq(struct iw_freq *freq, u8 channel);
+int zd_find_channel(u8 *channel, const struct iw_freq *freq);
+
+#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
+
+struct ofdm_plcp_header {
+ u8 prefix[3];
+ __le16 service;
+} __attribute__((packed));
+
+static inline u8 zd_ofdm_plcp_header_rate(
+ const struct ofdm_plcp_header *header)
+{
+ return header->prefix[0] & 0xf;
+}
+
+#define ZD_OFDM_RATE_6M 0xb
+#define ZD_OFDM_RATE_9M 0xf
+#define ZD_OFDM_RATE_12M 0xa
+#define ZD_OFDM_RATE_18M 0xe
+#define ZD_OFDM_RATE_24M 0x9
+#define ZD_OFDM_RATE_36M 0xd
+#define ZD_OFDM_RATE_48M 0x8
+#define ZD_OFDM_RATE_54M 0xc
+
+struct cck_plcp_header {
+ u8 signal;
+ u8 service;
+ __le16 length;
+ __le16 crc16;
+} __attribute__((packed));
+
+static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header)
+{
+ return header->signal;
+}
+
+#define ZD_CCK_SIGNAL_1M 0x0a
+#define ZD_CCK_SIGNAL_2M 0x14
+#define ZD_CCK_SIGNAL_5M5 0x37
+#define ZD_CCK_SIGNAL_11M 0x6e
+
+enum ieee80211_std {
+ IEEE80211B = 0x01,
+ IEEE80211A = 0x02,
+ IEEE80211G = 0x04,
+};
+
+#endif /* _ZD_IEEE80211_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
new file mode 100644
index 000000000000..a9bd80a08613
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -0,0 +1,1084 @@
+/* zd_mac.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/usb.h>
+#include <linux/jiffies.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "zd_def.h"
+#include "zd_chip.h"
+#include "zd_mac.h"
+#include "zd_ieee80211.h"
+#include "zd_netdev.h"
+#include "zd_rf.h"
+#include "zd_util.h"
+
+static void ieee_init(struct ieee80211_device *ieee);
+static void softmac_init(struct ieee80211softmac_device *sm);
+
+int zd_mac_init(struct zd_mac *mac,
+ struct net_device *netdev,
+ struct usb_interface *intf)
+{
+ struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev);
+
+ memset(mac, 0, sizeof(*mac));
+ spin_lock_init(&mac->lock);
+ mac->netdev = netdev;
+
+ ieee_init(ieee);
+ softmac_init(ieee80211_priv(netdev));
+ zd_chip_init(&mac->chip, netdev, intf);
+ return 0;
+}
+
+static int reset_channel(struct zd_mac *mac)
+{
+ int r;
+ unsigned long flags;
+ const struct channel_range *range;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ range = zd_channel_range(mac->regdomain);
+ if (!range->start) {
+ r = -EINVAL;
+ goto out;
+ }
+ mac->requested_channel = range->start;
+ r = 0;
+out:
+ spin_unlock_irqrestore(&mac->lock, flags);
+ return r;
+}
+
+int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
+{
+ int r;
+ struct zd_chip *chip = &mac->chip;
+ u8 addr[ETH_ALEN];
+ u8 default_regdomain;
+
+ r = zd_chip_enable_int(chip);
+ if (r)
+ goto out;
+ r = zd_chip_init_hw(chip, device_type);
+ if (r)
+ goto disable_int;
+
+ zd_get_e2p_mac_addr(chip, addr);
+ r = zd_write_mac_addr(chip, addr);
+ if (r)
+ goto disable_int;
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&mac->lock);
+ memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
+ spin_unlock_irq(&mac->lock);
+
+ r = zd_read_regdomain(chip, &default_regdomain);
+ if (r)
+ goto disable_int;
+ if (!zd_regdomain_supported(default_regdomain)) {
+ dev_dbg_f(zd_mac_dev(mac),
+ "Regulatory Domain %#04x is not supported.\n",
+ default_regdomain);
+ r = -EINVAL;
+ goto disable_int;
+ }
+ spin_lock_irq(&mac->lock);
+ mac->regdomain = mac->default_regdomain = default_regdomain;
+ spin_unlock_irq(&mac->lock);
+ r = reset_channel(mac);
+ if (r)
+ goto disable_int;
+
+ /* We must inform the device that we are doing encryption/decryption in
+ * software at the moment. */
+ r = zd_set_encryption_type(chip, ENC_SNIFFER);
+ if (r)
+ goto disable_int;
+
+ r = zd_geo_init(zd_mac_to_ieee80211(mac), mac->regdomain);
+ if (r)
+ goto disable_int;
+
+ r = 0;
+disable_int:
+ zd_chip_disable_int(chip);
+out:
+ return r;
+}
+
+void zd_mac_clear(struct zd_mac *mac)
+{
+ /* Aquire the lock. */
+ spin_lock(&mac->lock);
+ spin_unlock(&mac->lock);
+ zd_chip_clear(&mac->chip);
+ memset(mac, 0, sizeof(*mac));
+}
+
+static int reset_mode(struct zd_mac *mac)
+{
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ struct zd_ioreq32 ioreqs[3] = {
+ { CR_RX_FILTER, STA_RX_FILTER },
+ { CR_SNIFFER_ON, 0U },
+ };
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
+ ioreqs[0].value = 0xffffffff;
+ ioreqs[1].value = 0x1;
+ ioreqs[2].value = ENC_SNIFFER;
+ }
+
+ return zd_iowrite32a(&mac->chip, ioreqs, 3);
+}
+
+int zd_mac_open(struct net_device *netdev)
+{
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct zd_chip *chip = &mac->chip;
+ int r;
+
+ r = zd_chip_enable_int(chip);
+ if (r < 0)
+ goto out;
+
+ r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
+ if (r < 0)
+ goto disable_int;
+ r = reset_mode(mac);
+ if (r)
+ goto disable_int;
+ r = zd_chip_switch_radio_on(chip);
+ if (r < 0)
+ goto disable_int;
+ r = zd_chip_set_channel(chip, mac->requested_channel);
+ if (r < 0)
+ goto disable_radio;
+ r = zd_chip_enable_rx(chip);
+ if (r < 0)
+ goto disable_radio;
+ r = zd_chip_enable_hwint(chip);
+ if (r < 0)
+ goto disable_rx;
+
+ ieee80211softmac_start(netdev);
+ return 0;
+disable_rx:
+ zd_chip_disable_rx(chip);
+disable_radio:
+ zd_chip_switch_radio_off(chip);
+disable_int:
+ zd_chip_disable_int(chip);
+out:
+ return r;
+}
+
+int zd_mac_stop(struct net_device *netdev)
+{
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct zd_chip *chip = &mac->chip;
+
+ netif_stop_queue(netdev);
+
+ /*
+ * The order here deliberately is a little different from the open()
+ * method, since we need to make sure there is no opportunity for RX
+ * frames to be processed by softmac after we have stopped it.
+ */
+
+ zd_chip_disable_rx(chip);
+ ieee80211softmac_stop(netdev);
+
+ zd_chip_disable_hwint(chip);
+ zd_chip_switch_radio_off(chip);
+ zd_chip_disable_int(chip);
+
+ return 0;
+}
+
+int zd_mac_set_mac_address(struct net_device *netdev, void *p)
+{
+ int r;
+ unsigned long flags;
+ struct sockaddr *addr = p;
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct zd_chip *chip = &mac->chip;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ dev_dbg_f(zd_mac_dev(mac),
+ "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data));
+
+ r = zd_write_mac_addr(chip, addr->sa_data);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ return 0;
+}
+
+int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain)
+{
+ int r;
+ u8 channel;
+
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&mac->lock);
+ if (regdomain == 0) {
+ regdomain = mac->default_regdomain;
+ }
+ if (!zd_regdomain_supported(regdomain)) {
+ spin_unlock_irq(&mac->lock);
+ return -EINVAL;
+ }
+ mac->regdomain = regdomain;
+ channel = mac->requested_channel;
+ spin_unlock_irq(&mac->lock);
+
+ r = zd_geo_init(zd_mac_to_ieee80211(mac), regdomain);
+ if (r)
+ return r;
+ if (!zd_regdomain_supports_channel(regdomain, channel)) {
+ r = reset_channel(mac);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+u8 zd_mac_get_regdomain(struct zd_mac *mac)
+{
+ unsigned long flags;
+ u8 regdomain;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ regdomain = mac->regdomain;
+ spin_unlock_irqrestore(&mac->lock, flags);
+ return regdomain;
+}
+
+static void set_channel(struct net_device *netdev, u8 channel)
+{
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+
+ dev_dbg_f(zd_mac_dev(mac), "channel %d\n", channel);
+
+ zd_chip_set_channel(&mac->chip, channel);
+}
+
+/* TODO: Should not work in Managed mode. */
+int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
+{
+ unsigned long lock_flags;
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+
+ if (ieee->iw_mode == IW_MODE_INFRA)
+ return -EPERM;
+
+ spin_lock_irqsave(&mac->lock, lock_flags);
+ if (!zd_regdomain_supports_channel(mac->regdomain, channel)) {
+ spin_unlock_irqrestore(&mac->lock, lock_flags);
+ return -EINVAL;
+ }
+ mac->requested_channel = channel;
+ spin_unlock_irqrestore(&mac->lock, lock_flags);
+ if (netif_running(mac->netdev))
+ return zd_chip_set_channel(&mac->chip, channel);
+ else
+ return 0;
+}
+
+int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
+{
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+
+ *channel = zd_chip_get_channel(&mac->chip);
+ if (ieee->iw_mode != IW_MODE_INFRA) {
+ spin_lock_irq(&mac->lock);
+ *flags = *channel == mac->requested_channel ?
+ MAC_FIXED_CHANNEL : 0;
+ spin_unlock(&mac->lock);
+ } else {
+ *flags = 0;
+ }
+ dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
+ return 0;
+}
+
+/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
+static u8 cs_typed_rate(u8 cs_rate)
+{
+ static const u8 typed_rates[16] = {
+ [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
+ [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
+ [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
+ [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
+ [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M,
+ [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M,
+ [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M,
+ [ZD_OFDM_RATE_18M] = ZD_CS_OFDM|ZD_OFDM_RATE_18M,
+ [ZD_OFDM_RATE_24M] = ZD_CS_OFDM|ZD_OFDM_RATE_24M,
+ [ZD_OFDM_RATE_36M] = ZD_CS_OFDM|ZD_OFDM_RATE_36M,
+ [ZD_OFDM_RATE_48M] = ZD_CS_OFDM|ZD_OFDM_RATE_48M,
+ [ZD_OFDM_RATE_54M] = ZD_CS_OFDM|ZD_OFDM_RATE_54M,
+ };
+
+ ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
+ return typed_rates[cs_rate & ZD_CS_RATE_MASK];
+}
+
+/* Fallback to lowest rate, if rate is unknown. */
+static u8 rate_to_cs_rate(u8 rate)
+{
+ switch (rate) {
+ case IEEE80211_CCK_RATE_2MB:
+ return ZD_CS_CCK_RATE_2M;
+ case IEEE80211_CCK_RATE_5MB:
+ return ZD_CS_CCK_RATE_5_5M;
+ case IEEE80211_CCK_RATE_11MB:
+ return ZD_CS_CCK_RATE_11M;
+ case IEEE80211_OFDM_RATE_6MB:
+ return ZD_OFDM_RATE_6M;
+ case IEEE80211_OFDM_RATE_9MB:
+ return ZD_OFDM_RATE_9M;
+ case IEEE80211_OFDM_RATE_12MB:
+ return ZD_OFDM_RATE_12M;
+ case IEEE80211_OFDM_RATE_18MB:
+ return ZD_OFDM_RATE_18M;
+ case IEEE80211_OFDM_RATE_24MB:
+ return ZD_OFDM_RATE_24M;
+ case IEEE80211_OFDM_RATE_36MB:
+ return ZD_OFDM_RATE_36M;
+ case IEEE80211_OFDM_RATE_48MB:
+ return ZD_OFDM_RATE_48M;
+ case IEEE80211_OFDM_RATE_54MB:
+ return ZD_OFDM_RATE_54M;
+ }
+ return ZD_CS_CCK_RATE_1M;
+}
+
+int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
+{
+ struct ieee80211_device *ieee;
+
+ switch (mode) {
+ case IW_MODE_AUTO:
+ case IW_MODE_ADHOC:
+ case IW_MODE_INFRA:
+ mac->netdev->type = ARPHRD_ETHER;
+ break;
+ case IW_MODE_MONITOR:
+ mac->netdev->type = ARPHRD_IEEE80211_RADIOTAP;
+ break;
+ default:
+ dev_dbg_f(zd_mac_dev(mac), "wrong mode %u\n", mode);
+ return -EINVAL;
+ }
+
+ ieee = zd_mac_to_ieee80211(mac);
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&ieee->lock);
+ ieee->iw_mode = mode;
+ spin_unlock_irq(&ieee->lock);
+
+ if (netif_running(mac->netdev))
+ return reset_mode(mac);
+
+ return 0;
+}
+
+int zd_mac_get_mode(struct zd_mac *mac, u32 *mode)
+{
+ unsigned long flags;
+ struct ieee80211_device *ieee;
+
+ ieee = zd_mac_to_ieee80211(mac);
+ spin_lock_irqsave(&ieee->lock, flags);
+ *mode = ieee->iw_mode;
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ return 0;
+}
+
+int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range)
+{
+ int i;
+ const struct channel_range *channel_range;
+ u8 regdomain;
+
+ memset(range, 0, sizeof(*range));
+
+ /* FIXME: Not so important and depends on the mode. For 802.11g
+ * usually this value is used. It seems to be that Bit/s number is
+ * given here.
+ */
+ range->throughput = 27 * 1000 * 1000;
+
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+
+ /* FIXME: Needs still to be tuned. */
+ range->avg_qual.qual = 71;
+ range->avg_qual.level = 80;
+
+ /* FIXME: depends on standard? */
+ range->min_rts = 256;
+ range->max_rts = 2346;
+
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->max_encoding_tokens = WEP_KEYS;
+ range->num_encoding_sizes = 2;
+ range->encoding_size[0] = 5;
+ range->encoding_size[1] = WEP_KEY_LEN;
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 20;
+
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&mac->lock);
+ regdomain = mac->regdomain;
+ spin_unlock_irq(&mac->lock);
+ channel_range = zd_channel_range(regdomain);
+
+ range->num_channels = channel_range->end - channel_range->start;
+ range->old_num_channels = range->num_channels;
+ range->num_frequency = range->num_channels;
+ range->old_num_frequency = range->num_frequency;
+
+ for (i = 0; i < range->num_frequency; i++) {
+ struct iw_freq *freq = &range->freq[i];
+ freq->i = channel_range->start + i;
+ zd_channel_to_freq(freq, freq->i);
+ }
+
+ return 0;
+}
+
+static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
+{
+ static const u8 rate_divisor[] = {
+ [ZD_CS_CCK_RATE_1M] = 1,
+ [ZD_CS_CCK_RATE_2M] = 2,
+ [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */
+ [ZD_CS_CCK_RATE_11M] = 11,
+ [ZD_OFDM_RATE_6M] = 6,
+ [ZD_OFDM_RATE_9M] = 9,
+ [ZD_OFDM_RATE_12M] = 12,
+ [ZD_OFDM_RATE_18M] = 18,
+ [ZD_OFDM_RATE_24M] = 24,
+ [ZD_OFDM_RATE_36M] = 36,
+ [ZD_OFDM_RATE_48M] = 48,
+ [ZD_OFDM_RATE_54M] = 54,
+ };
+
+ u32 bits = (u32)tx_length * 8;
+ u32 divisor;
+
+ divisor = rate_divisor[cs_rate];
+ if (divisor == 0)
+ return -EINVAL;
+
+ switch (cs_rate) {
+ case ZD_CS_CCK_RATE_5_5M:
+ bits = (2*bits) + 10; /* round up to the next integer */
+ break;
+ case ZD_CS_CCK_RATE_11M:
+ if (service) {
+ u32 t = bits % 11;
+ *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
+ if (0 < t && t <= 3) {
+ *service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION;
+ }
+ }
+ bits += 10; /* round up to the next integer */
+ break;
+ }
+
+ return bits/divisor;
+}
+
+enum {
+ R2M_SHORT_PREAMBLE = 0x01,
+ R2M_11A = 0x02,
+};
+
+static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
+{
+ u8 modulation;
+
+ modulation = cs_typed_rate(cs_rate);
+ if (flags & R2M_SHORT_PREAMBLE) {
+ switch (ZD_CS_RATE(modulation)) {
+ case ZD_CS_CCK_RATE_2M:
+ case ZD_CS_CCK_RATE_5_5M:
+ case ZD_CS_CCK_RATE_11M:
+ modulation |= ZD_CS_CCK_PREA_SHORT;
+ return modulation;
+ }
+ }
+ if (flags & R2M_11A) {
+ if (ZD_CS_TYPE(modulation) == ZD_CS_OFDM)
+ modulation |= ZD_CS_OFDM_MODE_11A;
+ }
+ return modulation;
+}
+
+static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs,
+ struct ieee80211_hdr_4addr *hdr)
+{
+ struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
+ u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
+ u8 rate, cs_rate;
+ int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
+
+ /* FIXME: 802.11a? short preamble? */
+ rate = ieee80211softmac_suggest_txrate(softmac,
+ is_multicast_ether_addr(hdr->addr1), is_mgt);
+
+ cs_rate = rate_to_cs_rate(rate);
+ cs->modulation = cs_rate_to_modulation(cs_rate, 0);
+}
+
+static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
+ struct ieee80211_hdr_4addr *header)
+{
+ unsigned int tx_length = le16_to_cpu(cs->tx_length);
+ u16 fctl = le16_to_cpu(header->frame_ctl);
+ u16 ftype = WLAN_FC_GET_TYPE(fctl);
+ u16 stype = WLAN_FC_GET_STYPE(fctl);
+
+ /*
+ * CONTROL:
+ * - start at 0x00
+ * - if fragment 0, enable bit 0
+ * - if backoff needed, enable bit 0
+ * - if burst (backoff not needed) disable bit 0
+ * - if multicast, enable bit 1
+ * - if PS-POLL frame, enable bit 2
+ * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
+ * bit 4 (FIXME: wtf)
+ * - if frag_len > RTS threshold, set bit 5 as long if it isnt
+ * multicast or mgt
+ * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
+ * 7
+ */
+
+ cs->control = 0;
+
+ /* First fragment */
+ if (WLAN_GET_SEQ_FRAG(le16_to_cpu(header->seq_ctl)) == 0)
+ cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
+
+ /* Multicast */
+ if (is_multicast_ether_addr(header->addr1))
+ cs->control |= ZD_CS_MULTICAST;
+
+ /* PS-POLL */
+ if (stype == IEEE80211_STYPE_PSPOLL)
+ cs->control |= ZD_CS_PS_POLL_FRAME;
+
+ if (!is_multicast_ether_addr(header->addr1) &&
+ ftype != IEEE80211_FTYPE_MGMT &&
+ tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
+ {
+ /* FIXME: check the logic */
+ if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
+ /* 802.11g */
+ cs->control |= ZD_CS_SELF_CTS;
+ } else { /* 802.11b */
+ cs->control |= ZD_CS_RTS;
+ }
+ }
+
+ /* FIXME: Management frame? */
+}
+
+static int fill_ctrlset(struct zd_mac *mac,
+ struct ieee80211_txb *txb,
+ int frag_num)
+{
+ int r;
+ struct sk_buff *skb = txb->fragments[frag_num];
+ struct ieee80211_hdr_4addr *hdr =
+ (struct ieee80211_hdr_4addr *) skb->data;
+ unsigned int frag_len = skb->len + IEEE80211_FCS_LEN;
+ unsigned int next_frag_len;
+ unsigned int packet_length;
+ struct zd_ctrlset *cs = (struct zd_ctrlset *)
+ skb_push(skb, sizeof(struct zd_ctrlset));
+
+ if (frag_num+1 < txb->nr_frags) {
+ next_frag_len = txb->fragments[frag_num+1]->len +
+ IEEE80211_FCS_LEN;
+ } else {
+ next_frag_len = 0;
+ }
+ ZD_ASSERT(frag_len <= 0xffff);
+ ZD_ASSERT(next_frag_len <= 0xffff);
+
+ cs_set_modulation(mac, cs, hdr);
+
+ cs->tx_length = cpu_to_le16(frag_len);
+
+ cs_set_control(mac, cs, hdr);
+
+ packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
+ ZD_ASSERT(packet_length <= 0xffff);
+ /* ZD1211B: Computing the length difference this way, gives us
+ * flexibility to compute the packet length.
+ */
+ cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ?
+ packet_length - frag_len : packet_length);
+
+ /*
+ * CURRENT LENGTH:
+ * - transmit frame length in microseconds
+ * - seems to be derived from frame length
+ * - see Cal_Us_Service() in zdinlinef.h
+ * - if macp->bTxBurstEnable is enabled, then multiply by 4
+ * - bTxBurstEnable is never set in the vendor driver
+ *
+ * SERVICE:
+ * - "for PLCP configuration"
+ * - always 0 except in some situations at 802.11b 11M
+ * - see line 53 of zdinlinef.h
+ */
+ cs->service = 0;
+ r = zd_calc_tx_length_us(&cs->service, ZD_CS_RATE(cs->modulation),
+ le16_to_cpu(cs->tx_length));
+ if (r < 0)
+ return r;
+ cs->current_length = cpu_to_le16(r);
+
+ if (next_frag_len == 0) {
+ cs->next_frame_length = 0;
+ } else {
+ r = zd_calc_tx_length_us(NULL, ZD_CS_RATE(cs->modulation),
+ next_frag_len);
+ if (r < 0)
+ return r;
+ cs->next_frame_length = cpu_to_le16(r);
+ }
+
+ return 0;
+}
+
+static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
+{
+ int i, r;
+
+ for (i = 0; i < txb->nr_frags; i++) {
+ struct sk_buff *skb = txb->fragments[i];
+
+ r = fill_ctrlset(mac, txb, i);
+ if (r)
+ return r;
+ r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len);
+ if (r)
+ return r;
+ }
+
+ /* FIXME: shouldn't this be handled by the upper layers? */
+ mac->netdev->trans_start = jiffies;
+
+ ieee80211_txb_free(txb);
+ return 0;
+}
+
+struct zd_rt_hdr {
+ struct ieee80211_radiotap_header rt_hdr;
+ u8 rt_flags;
+ u8 rt_rate;
+ u16 rt_channel;
+ u16 rt_chbitmask;
+} __attribute__((packed));
+
+static void fill_rt_header(void *buffer, struct zd_mac *mac,
+ const struct ieee80211_rx_stats *stats,
+ const struct rx_status *status)
+{
+ struct zd_rt_hdr *hdr = buffer;
+
+ hdr->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ hdr->rt_hdr.it_pad = 0;
+ hdr->rt_hdr.it_len = cpu_to_le16(sizeof(struct zd_rt_hdr));
+ hdr->rt_hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_RATE));
+
+ hdr->rt_flags = 0;
+ if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256))
+ hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP;
+
+ hdr->rt_rate = stats->rate / 5;
+
+ /* FIXME: 802.11a */
+ hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz(
+ _zd_chip_get_channel(&mac->chip)));
+ hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ |
+ ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) ==
+ ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK));
+}
+
+/* Returns 1 if the data packet is for us and 0 otherwise. */
+static int is_data_packet_for_us(struct ieee80211_device *ieee,
+ struct ieee80211_hdr_4addr *hdr)
+{
+ struct net_device *netdev = ieee->dev;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+ ZD_ASSERT(WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA);
+
+ switch (ieee->iw_mode) {
+ case IW_MODE_ADHOC:
+ if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) != 0 ||
+ memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) != 0)
+ return 0;
+ break;
+ case IW_MODE_AUTO:
+ case IW_MODE_INFRA:
+ if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) !=
+ IEEE80211_FCTL_FROMDS ||
+ memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) != 0)
+ return 0;
+ break;
+ default:
+ ZD_ASSERT(ieee->iw_mode != IW_MODE_MONITOR);
+ return 0;
+ }
+
+ return memcmp(hdr->addr1, netdev->dev_addr, ETH_ALEN) == 0 ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ (netdev->flags & IFF_PROMISC);
+}
+
+/* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
+ * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
+ * called here.
+ *
+ * It has been based on ieee80211_rx_any.
+ */
+static int filter_rx(struct ieee80211_device *ieee,
+ const u8 *buffer, unsigned int length,
+ struct ieee80211_rx_stats *stats)
+{
+ struct ieee80211_hdr_4addr *hdr;
+ u16 fc;
+
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ return 1;
+
+ hdr = (struct ieee80211_hdr_4addr *)buffer;
+ fc = le16_to_cpu(hdr->frame_ctl);
+ if ((fc & IEEE80211_FCTL_VERS) != 0)
+ return -EINVAL;
+
+ switch (WLAN_FC_GET_TYPE(fc)) {
+ case IEEE80211_FTYPE_MGMT:
+ if (length < sizeof(struct ieee80211_hdr_3addr))
+ return -EINVAL;
+ ieee80211_rx_mgt(ieee, hdr, stats);
+ return 0;
+ case IEEE80211_FTYPE_CTL:
+ /* Ignore invalid short buffers */
+ return 0;
+ case IEEE80211_FTYPE_DATA:
+ if (length < sizeof(struct ieee80211_hdr_3addr))
+ return -EINVAL;
+ return is_data_packet_for_us(ieee, hdr);
+ }
+
+ return -EINVAL;
+}
+
+static void update_qual_rssi(struct zd_mac *mac,
+ const u8 *buffer, unsigned int length,
+ u8 qual_percent, u8 rssi_percent)
+{
+ unsigned long flags;
+ struct ieee80211_hdr_3addr *hdr;
+ int i;
+
+ hdr = (struct ieee80211_hdr_3addr *)buffer;
+ if (length < offsetof(struct ieee80211_hdr_3addr, addr3))
+ return;
+ if (memcmp(hdr->addr2, zd_mac_to_ieee80211(mac)->bssid, ETH_ALEN) != 0)
+ return;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ i = mac->stats_count % ZD_MAC_STATS_BUFFER_SIZE;
+ mac->qual_buffer[i] = qual_percent;
+ mac->rssi_buffer[i] = rssi_percent;
+ mac->stats_count++;
+ spin_unlock_irqrestore(&mac->lock, flags);
+}
+
+static int fill_rx_stats(struct ieee80211_rx_stats *stats,
+ const struct rx_status **pstatus,
+ struct zd_mac *mac,
+ const u8 *buffer, unsigned int length)
+{
+ const struct rx_status *status;
+
+ *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status));
+ if (status->frame_status & ZD_RX_ERROR) {
+ /* FIXME: update? */
+ return -EINVAL;
+ }
+ memset(stats, 0, sizeof(struct ieee80211_rx_stats));
+ stats->len = length - (ZD_PLCP_HEADER_SIZE + IEEE80211_FCS_LEN +
+ + sizeof(struct rx_status));
+ /* FIXME: 802.11a */
+ stats->freq = IEEE80211_24GHZ_BAND;
+ stats->received_channel = _zd_chip_get_channel(&mac->chip);
+ stats->rssi = zd_rx_strength_percent(status->signal_strength);
+ stats->signal = zd_rx_qual_percent(buffer,
+ length - sizeof(struct rx_status),
+ status);
+ stats->mask = IEEE80211_STATMASK_RSSI | IEEE80211_STATMASK_SIGNAL;
+ stats->rate = zd_rx_rate(buffer, status);
+ if (stats->rate)
+ stats->mask |= IEEE80211_STATMASK_RATE;
+
+ return 0;
+}
+
+int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length)
+{
+ int r;
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ struct ieee80211_rx_stats stats;
+ const struct rx_status *status;
+ struct sk_buff *skb;
+
+ if (length < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN +
+ IEEE80211_FCS_LEN + sizeof(struct rx_status))
+ return -EINVAL;
+
+ r = fill_rx_stats(&stats, &status, mac, buffer, length);
+ if (r)
+ return r;
+
+ length -= ZD_PLCP_HEADER_SIZE+IEEE80211_FCS_LEN+
+ sizeof(struct rx_status);
+ buffer += ZD_PLCP_HEADER_SIZE;
+
+ update_qual_rssi(mac, buffer, length, stats.signal, stats.rssi);
+
+ r = filter_rx(ieee, buffer, length, &stats);
+ if (r <= 0)
+ return r;
+
+ skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
+ if (!skb)
+ return -ENOMEM;
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ fill_rt_header(skb_put(skb, sizeof(struct zd_rt_hdr)), mac,
+ &stats, status);
+ memcpy(skb_put(skb, length), buffer, length);
+
+ r = ieee80211_rx(ieee, skb, &stats);
+ if (!r) {
+ ZD_ASSERT(in_irq());
+ dev_kfree_skb_irq(skb);
+ }
+ return 0;
+}
+
+static int netdev_tx(struct ieee80211_txb *txb, struct net_device *netdev,
+ int pri)
+{
+ return zd_mac_tx(zd_netdev_mac(netdev), txb, pri);
+}
+
+static void set_security(struct net_device *netdev,
+ struct ieee80211_security *sec)
+{
+ struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev);
+ struct ieee80211_security *secinfo = &ieee->sec;
+ int keyidx;
+
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n");
+
+ for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
+ if (sec->flags & (1<<keyidx)) {
+ secinfo->encode_alg[keyidx] = sec->encode_alg[keyidx];
+ secinfo->key_sizes[keyidx] = sec->key_sizes[keyidx];
+ memcpy(secinfo->keys[keyidx], sec->keys[keyidx],
+ SCM_KEY_LEN);
+ }
+
+ if (sec->flags & SEC_ACTIVE_KEY) {
+ secinfo->active_key = sec->active_key;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .active_key = %d\n", sec->active_key);
+ }
+ if (sec->flags & SEC_UNICAST_GROUP) {
+ secinfo->unicast_uses_group = sec->unicast_uses_group;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .unicast_uses_group = %d\n",
+ sec->unicast_uses_group);
+ }
+ if (sec->flags & SEC_LEVEL) {
+ secinfo->level = sec->level;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .level = %d\n", sec->level);
+ }
+ if (sec->flags & SEC_ENABLED) {
+ secinfo->enabled = sec->enabled;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .enabled = %d\n", sec->enabled);
+ }
+ if (sec->flags & SEC_ENCRYPT) {
+ secinfo->encrypt = sec->encrypt;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .encrypt = %d\n", sec->encrypt);
+ }
+ if (sec->flags & SEC_AUTH_MODE) {
+ secinfo->auth_mode = sec->auth_mode;
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
+ " .auth_mode = %d\n", sec->auth_mode);
+ }
+}
+
+static void ieee_init(struct ieee80211_device *ieee)
+{
+ ieee->mode = IEEE_B | IEEE_G;
+ ieee->freq_band = IEEE80211_24GHZ_BAND;
+ ieee->modulation = IEEE80211_OFDM_MODULATION | IEEE80211_CCK_MODULATION;
+ ieee->tx_headroom = sizeof(struct zd_ctrlset);
+ ieee->set_security = set_security;
+ ieee->hard_start_xmit = netdev_tx;
+
+ /* Software encryption/decryption for now */
+ ieee->host_build_iv = 0;
+ ieee->host_encrypt = 1;
+ ieee->host_decrypt = 1;
+
+ /* FIXME: default to managed mode, until ieee80211 and zd1211rw can
+ * correctly support AUTO */
+ ieee->iw_mode = IW_MODE_INFRA;
+}
+
+static void softmac_init(struct ieee80211softmac_device *sm)
+{
+ sm->set_channel = set_channel;
+}
+
+struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
+{
+ struct zd_mac *mac = zd_netdev_mac(ndev);
+ struct iw_statistics *iw_stats = &mac->iw_stats;
+ unsigned int i, count, qual_total, rssi_total;
+
+ memset(iw_stats, 0, sizeof(struct iw_statistics));
+ /* We are not setting the status, because ieee->state is not updated
+ * at all and this driver doesn't track authentication state.
+ */
+ spin_lock_irq(&mac->lock);
+ count = mac->stats_count < ZD_MAC_STATS_BUFFER_SIZE ?
+ mac->stats_count : ZD_MAC_STATS_BUFFER_SIZE;
+ qual_total = rssi_total = 0;
+ for (i = 0; i < count; i++) {
+ qual_total += mac->qual_buffer[i];
+ rssi_total += mac->rssi_buffer[i];
+ }
+ spin_unlock_irq(&mac->lock);
+ iw_stats->qual.updated = IW_QUAL_NOISE_INVALID;
+ if (count > 0) {
+ iw_stats->qual.qual = qual_total / count;
+ iw_stats->qual.level = rssi_total / count;
+ iw_stats->qual.updated |=
+ IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED;
+ } else {
+ iw_stats->qual.updated |=
+ IW_QUAL_QUAL_INVALID|IW_QUAL_LEVEL_INVALID;
+ }
+ /* TODO: update counter */
+ return iw_stats;
+}
+
+#ifdef DEBUG
+static const char* decryption_types[] = {
+ [ZD_RX_NO_WEP] = "none",
+ [ZD_RX_WEP64] = "WEP64",
+ [ZD_RX_TKIP] = "TKIP",
+ [ZD_RX_AES] = "AES",
+ [ZD_RX_WEP128] = "WEP128",
+ [ZD_RX_WEP256] = "WEP256",
+};
+
+static const char *decryption_type_string(u8 type)
+{
+ const char *s;
+
+ if (type < ARRAY_SIZE(decryption_types)) {
+ s = decryption_types[type];
+ } else {
+ s = NULL;
+ }
+ return s ? s : "unknown";
+}
+
+static int is_ofdm(u8 frame_status)
+{
+ return (frame_status & ZD_RX_OFDM);
+}
+
+void zd_dump_rx_status(const struct rx_status *status)
+{
+ const char* modulation;
+ u8 quality;
+
+ if (is_ofdm(status->frame_status)) {
+ modulation = "ofdm";
+ quality = status->signal_quality_ofdm;
+ } else {
+ modulation = "cck";
+ quality = status->signal_quality_cck;
+ }
+ pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
+ modulation, status->signal_strength, quality,
+ decryption_type_string(status->decryption_type));
+ if (status->frame_status & ZD_RX_ERROR) {
+ pr_debug("rx error %s%s%s%s%s%s\n",
+ (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
+ "timeout " : "",
+ (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
+ "fifo " : "",
+ (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
+ "decryption " : "",
+ (status->frame_status & ZD_RX_CRC32_ERROR) ?
+ "crc32 " : "",
+ (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
+ "addr1 " : "",
+ (status->frame_status & ZD_RX_CRC16_ERROR) ?
+ "crc16" : "");
+ }
+}
+#endif /* DEBUG */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
new file mode 100644
index 000000000000..b3ba49b84634
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -0,0 +1,193 @@
+/* zd_mac.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_MAC_H
+#define _ZD_MAC_H
+
+#include <linux/wireless.h>
+#include <linux/kernel.h>
+#include <net/ieee80211.h>
+#include <net/ieee80211softmac.h>
+
+#include "zd_chip.h"
+#include "zd_netdev.h"
+
+struct zd_ctrlset {
+ u8 modulation;
+ __le16 tx_length;
+ u8 control;
+ /* stores only the difference to tx_length on ZD1211B */
+ __le16 packet_length;
+ __le16 current_length;
+ u8 service;
+ __le16 next_frame_length;
+} __attribute__((packed));
+
+#define ZD_CS_RESERVED_SIZE 25
+
+/* zd_crtlset field modulation */
+#define ZD_CS_RATE_MASK 0x0f
+#define ZD_CS_TYPE_MASK 0x10
+#define ZD_CS_RATE(modulation) ((modulation) & ZD_CS_RATE_MASK)
+#define ZD_CS_TYPE(modulation) ((modulation) & ZD_CS_TYPE_MASK)
+
+#define ZD_CS_CCK 0x00
+#define ZD_CS_OFDM 0x10
+
+#define ZD_CS_CCK_RATE_1M 0x00
+#define ZD_CS_CCK_RATE_2M 0x01
+#define ZD_CS_CCK_RATE_5_5M 0x02
+#define ZD_CS_CCK_RATE_11M 0x03
+/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
+ */
+
+/* bit 5 is preamble (when in CCK mode), or a/g selection (when in OFDM mode) */
+#define ZD_CS_CCK_PREA_LONG 0x00
+#define ZD_CS_CCK_PREA_SHORT 0x20
+#define ZD_CS_OFDM_MODE_11G 0x00
+#define ZD_CS_OFDM_MODE_11A 0x20
+
+/* zd_ctrlset control field */
+#define ZD_CS_NEED_RANDOM_BACKOFF 0x01
+#define ZD_CS_MULTICAST 0x02
+
+#define ZD_CS_FRAME_TYPE_MASK 0x0c
+#define ZD_CS_DATA_FRAME 0x00
+#define ZD_CS_PS_POLL_FRAME 0x04
+#define ZD_CS_MANAGEMENT_FRAME 0x08
+#define ZD_CS_NO_SEQUENCE_CTL_FRAME 0x0c
+
+#define ZD_CS_WAKE_DESTINATION 0x10
+#define ZD_CS_RTS 0x20
+#define ZD_CS_ENCRYPT 0x40
+#define ZD_CS_SELF_CTS 0x80
+
+/* Incoming frames are prepended by a PLCP header */
+#define ZD_PLCP_HEADER_SIZE 5
+
+struct rx_length_info {
+ __le16 length[3];
+ __le16 tag;
+} __attribute__((packed));
+
+#define RX_LENGTH_INFO_TAG 0x697e
+
+struct rx_status {
+ u8 signal_quality_cck;
+ /* rssi */
+ u8 signal_strength;
+ u8 signal_quality_ofdm;
+ u8 decryption_type;
+ u8 frame_status;
+} __attribute__((packed));
+
+/* rx_status field decryption_type */
+#define ZD_RX_NO_WEP 0
+#define ZD_RX_WEP64 1
+#define ZD_RX_TKIP 2
+#define ZD_RX_AES 4
+#define ZD_RX_WEP128 5
+#define ZD_RX_WEP256 6
+
+/* rx_status field frame_status */
+#define ZD_RX_FRAME_MODULATION_MASK 0x01
+#define ZD_RX_CCK 0x00
+#define ZD_RX_OFDM 0x01
+
+#define ZD_RX_TIMEOUT_ERROR 0x02
+#define ZD_RX_FIFO_OVERRUN_ERROR 0x04
+#define ZD_RX_DECRYPTION_ERROR 0x08
+#define ZD_RX_CRC32_ERROR 0x10
+#define ZD_RX_NO_ADDR1_MATCH_ERROR 0x20
+#define ZD_RX_CRC16_ERROR 0x40
+#define ZD_RX_ERROR 0x80
+
+enum mac_flags {
+ MAC_FIXED_CHANNEL = 0x01,
+};
+
+#define ZD_MAC_STATS_BUFFER_SIZE 16
+
+struct zd_mac {
+ struct net_device *netdev;
+ struct zd_chip chip;
+ spinlock_t lock;
+ /* Unlocked reading possible */
+ struct iw_statistics iw_stats;
+ unsigned int stats_count;
+ u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
+ u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
+ u8 regdomain;
+ u8 default_regdomain;
+ u8 requested_channel;
+};
+
+static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
+{
+ return zd_netdev_ieee80211(mac->netdev);
+}
+
+static inline struct zd_mac *zd_netdev_mac(struct net_device *netdev)
+{
+ return ieee80211softmac_priv(netdev);
+}
+
+static inline struct zd_mac *zd_chip_to_mac(struct zd_chip *chip)
+{
+ return container_of(chip, struct zd_mac, chip);
+}
+
+static inline struct zd_mac *zd_usb_to_mac(struct zd_usb *usb)
+{
+ return zd_chip_to_mac(zd_usb_to_chip(usb));
+}
+
+#define zd_mac_dev(mac) (zd_chip_dev(&(mac)->chip))
+
+int zd_mac_init(struct zd_mac *mac,
+ struct net_device *netdev,
+ struct usb_interface *intf);
+void zd_mac_clear(struct zd_mac *mac);
+
+int zd_mac_init_hw(struct zd_mac *mac, u8 device_type);
+
+int zd_mac_open(struct net_device *netdev);
+int zd_mac_stop(struct net_device *netdev);
+int zd_mac_set_mac_address(struct net_device *dev, void *p);
+
+int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length);
+
+int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain);
+u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
+
+int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
+int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
+
+int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
+int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
+
+int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range);
+
+struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev);
+
+#ifdef DEBUG
+void zd_dump_rx_status(const struct rx_status *status);
+#else
+#define zd_dump_rx_status(status)
+#endif /* DEBUG */
+
+#endif /* _ZD_MAC_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
new file mode 100644
index 000000000000..9df232c2c863
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -0,0 +1,267 @@
+/* zd_netdev.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/ieee80211.h>
+#include <net/ieee80211softmac.h>
+#include <net/ieee80211softmac_wx.h>
+#include <net/iw_handler.h>
+
+#include "zd_def.h"
+#include "zd_netdev.h"
+#include "zd_mac.h"
+#include "zd_ieee80211.h"
+
+/* Region 0 means reset regdomain to default. */
+static int zd_set_regdomain(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ const u8 *regdomain = (u8 *)req;
+ return zd_mac_set_regdomain(zd_netdev_mac(netdev), *regdomain);
+}
+
+static int zd_get_regdomain(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ u8 *regdomain = (u8 *)req;
+ if (!regdomain)
+ return -EINVAL;
+ *regdomain = zd_mac_get_regdomain(zd_netdev_mac(netdev));
+ return 0;
+}
+
+static const struct iw_priv_args zd_priv_args[] = {
+ {
+ .cmd = ZD_PRIV_SET_REGDOMAIN,
+ .set_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
+ .name = "set_regdomain",
+ },
+ {
+ .cmd = ZD_PRIV_GET_REGDOMAIN,
+ .get_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
+ .name = "get_regdomain",
+ },
+};
+
+#define PRIV_OFFSET(x) [(x)-SIOCIWFIRSTPRIV]
+
+static const iw_handler zd_priv_handler[] = {
+ PRIV_OFFSET(ZD_PRIV_SET_REGDOMAIN) = zd_set_regdomain,
+ PRIV_OFFSET(ZD_PRIV_GET_REGDOMAIN) = zd_get_regdomain,
+};
+
+static int iw_get_name(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ /* FIXME: check whether 802.11a will also supported, add also
+ * zd1211B, if we support it.
+ */
+ strlcpy(req->name, "802.11g zd1211", IFNAMSIZ);
+ return 0;
+}
+
+static int iw_set_freq(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ int r;
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct iw_freq *freq = &req->freq;
+ u8 channel;
+
+ r = zd_find_channel(&channel, freq);
+ if (r < 0)
+ return r;
+ r = zd_mac_request_channel(mac, channel);
+ return r;
+}
+
+static int iw_get_freq(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ int r;
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct iw_freq *freq = &req->freq;
+ u8 channel;
+ u8 flags;
+
+ r = zd_mac_get_channel(mac, &channel, &flags);
+ if (r)
+ return r;
+
+ freq->flags = (flags & MAC_FIXED_CHANNEL) ?
+ IW_FREQ_FIXED : IW_FREQ_AUTO;
+ dev_dbg_f(zd_mac_dev(mac), "channel %s\n",
+ (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto");
+ return zd_channel_to_freq(freq, channel);
+}
+
+static int iw_set_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ return zd_mac_set_mode(zd_netdev_mac(netdev), req->mode);
+}
+
+static int iw_get_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ return zd_mac_get_mode(zd_netdev_mac(netdev), &req->mode);
+}
+
+static int iw_get_range(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *req, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+
+ dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n");
+ req->data.length = sizeof(*range);
+ return zd_mac_get_range(zd_netdev_mac(netdev), range);
+}
+
+static int iw_set_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data,
+ char *extra)
+{
+ return ieee80211_wx_set_encode(zd_netdev_ieee80211(netdev), info,
+ data, extra);
+}
+
+static int iw_get_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data,
+ char *extra)
+{
+ return ieee80211_wx_get_encode(zd_netdev_ieee80211(netdev), info,
+ data, extra);
+}
+
+static int iw_set_encodeext(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data,
+ char *extra)
+{
+ return ieee80211_wx_set_encodeext(zd_netdev_ieee80211(netdev), info,
+ data, extra);
+}
+
+static int iw_get_encodeext(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data,
+ char *extra)
+{
+ return ieee80211_wx_get_encodeext(zd_netdev_ieee80211(netdev), info,
+ data, extra);
+}
+
+#define WX(x) [(x)-SIOCIWFIRST]
+
+static const iw_handler zd_standard_iw_handlers[] = {
+ WX(SIOCGIWNAME) = iw_get_name,
+ WX(SIOCSIWFREQ) = iw_set_freq,
+ WX(SIOCGIWFREQ) = iw_get_freq,
+ WX(SIOCSIWMODE) = iw_set_mode,
+ WX(SIOCGIWMODE) = iw_get_mode,
+ WX(SIOCGIWRANGE) = iw_get_range,
+ WX(SIOCSIWENCODE) = iw_set_encode,
+ WX(SIOCGIWENCODE) = iw_get_encode,
+ WX(SIOCSIWENCODEEXT) = iw_set_encodeext,
+ WX(SIOCGIWENCODEEXT) = iw_get_encodeext,
+ WX(SIOCSIWAUTH) = ieee80211_wx_set_auth,
+ WX(SIOCGIWAUTH) = ieee80211_wx_get_auth,
+ WX(SIOCSIWSCAN) = ieee80211softmac_wx_trigger_scan,
+ WX(SIOCGIWSCAN) = ieee80211softmac_wx_get_scan_results,
+ WX(SIOCSIWESSID) = ieee80211softmac_wx_set_essid,
+ WX(SIOCGIWESSID) = ieee80211softmac_wx_get_essid,
+ WX(SIOCSIWAP) = ieee80211softmac_wx_set_wap,
+ WX(SIOCGIWAP) = ieee80211softmac_wx_get_wap,
+ WX(SIOCSIWRATE) = ieee80211softmac_wx_set_rate,
+ WX(SIOCGIWRATE) = ieee80211softmac_wx_get_rate,
+ WX(SIOCSIWGENIE) = ieee80211softmac_wx_set_genie,
+ WX(SIOCGIWGENIE) = ieee80211softmac_wx_get_genie,
+ WX(SIOCSIWMLME) = ieee80211softmac_wx_set_mlme,
+};
+
+static const struct iw_handler_def iw_handler_def = {
+ .standard = zd_standard_iw_handlers,
+ .num_standard = ARRAY_SIZE(zd_standard_iw_handlers),
+ .private = zd_priv_handler,
+ .num_private = ARRAY_SIZE(zd_priv_handler),
+ .private_args = zd_priv_args,
+ .num_private_args = ARRAY_SIZE(zd_priv_args),
+ .get_wireless_stats = zd_mac_get_wireless_stats,
+};
+
+struct net_device *zd_netdev_alloc(struct usb_interface *intf)
+{
+ int r;
+ struct net_device *netdev;
+ struct zd_mac *mac;
+
+ netdev = alloc_ieee80211softmac(sizeof(struct zd_mac));
+ if (!netdev) {
+ dev_dbg_f(&intf->dev, "out of memory\n");
+ return NULL;
+ }
+
+ mac = zd_netdev_mac(netdev);
+ r = zd_mac_init(mac, netdev, intf);
+ if (r) {
+ usb_set_intfdata(intf, NULL);
+ free_ieee80211(netdev);
+ return NULL;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ dev_dbg_f(&intf->dev, "netdev->flags %#06hx\n", netdev->flags);
+ dev_dbg_f(&intf->dev, "netdev->features %#010lx\n", netdev->features);
+
+ netdev->open = zd_mac_open;
+ netdev->stop = zd_mac_stop;
+ /* netdev->get_stats = */
+ /* netdev->set_multicast_list = */
+ netdev->set_mac_address = zd_mac_set_mac_address;
+ netdev->wireless_handlers = &iw_handler_def;
+ /* netdev->ethtool_ops = */
+
+ return netdev;
+}
+
+void zd_netdev_free(struct net_device *netdev)
+{
+ if (!netdev)
+ return;
+
+ zd_mac_clear(zd_netdev_mac(netdev));
+ free_ieee80211(netdev);
+}
+
+void zd_netdev_disconnect(struct net_device *netdev)
+{
+ unregister_netdev(netdev);
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.h b/drivers/net/wireless/zd1211rw/zd_netdev.h
new file mode 100644
index 000000000000..374a957073c1
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.h
@@ -0,0 +1,45 @@
+/* zd_netdev.h: Header for net device related functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_NETDEV_H
+#define _ZD_NETDEV_H
+
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <net/ieee80211.h>
+
+#define ZD_PRIV_SET_REGDOMAIN (SIOCIWFIRSTPRIV)
+#define ZD_PRIV_GET_REGDOMAIN (SIOCIWFIRSTPRIV+1)
+
+static inline struct ieee80211_device *zd_netdev_ieee80211(
+ struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+static inline struct net_device *zd_ieee80211_to_netdev(
+ struct ieee80211_device *ieee)
+{
+ return ieee->dev;
+}
+
+struct net_device *zd_netdev_alloc(struct usb_interface *intf);
+void zd_netdev_free(struct net_device *netdev);
+
+void zd_netdev_disconnect(struct net_device *netdev);
+
+#endif /* _ZD_NETDEV_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
new file mode 100644
index 000000000000..d3770d2c61bc
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -0,0 +1,151 @@
+/* zd_rf.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "zd_def.h"
+#include "zd_rf.h"
+#include "zd_ieee80211.h"
+#include "zd_chip.h"
+
+static const char *rfs[] = {
+ [0] = "unknown RF0",
+ [1] = "unknown RF1",
+ [UW2451_RF] = "UW2451_RF",
+ [UCHIP_RF] = "UCHIP_RF",
+ [AL2230_RF] = "AL2230_RF",
+ [AL7230B_RF] = "AL7230B_RF",
+ [THETA_RF] = "THETA_RF",
+ [AL2210_RF] = "AL2210_RF",
+ [MAXIM_NEW_RF] = "MAXIM_NEW_RF",
+ [UW2453_RF] = "UW2453_RF",
+ [AL2230S_RF] = "AL2230S_RF",
+ [RALINK_RF] = "RALINK_RF",
+ [INTERSIL_RF] = "INTERSIL_RF",
+ [RF2959_RF] = "RF2959_RF",
+ [MAXIM_NEW2_RF] = "MAXIM_NEW2_RF",
+ [PHILIPS_RF] = "PHILIPS_RF",
+};
+
+const char *zd_rf_name(u8 type)
+{
+ if (type & 0xf0)
+ type = 0;
+ return rfs[type];
+}
+
+void zd_rf_init(struct zd_rf *rf)
+{
+ memset(rf, 0, sizeof(*rf));
+}
+
+void zd_rf_clear(struct zd_rf *rf)
+{
+ memset(rf, 0, sizeof(*rf));
+}
+
+int zd_rf_init_hw(struct zd_rf *rf, u8 type)
+{
+ int r, t;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ switch (type) {
+ case RF2959_RF:
+ r = zd_rf_init_rf2959(rf);
+ if (r)
+ return r;
+ break;
+ case AL2230_RF:
+ r = zd_rf_init_al2230(rf);
+ if (r)
+ return r;
+ break;
+ default:
+ dev_err(zd_chip_dev(chip),
+ "RF %s %#x is not supported\n", zd_rf_name(type), type);
+ rf->type = 0;
+ return -ENODEV;
+ }
+
+ rf->type = type;
+
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ return r;
+ t = rf->init_hw(rf);
+ r = zd_chip_unlock_phy_regs(chip);
+ if (t)
+ r = t;
+ return r;
+}
+
+int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size)
+{
+ return scnprintf(buffer, size, "%s", zd_rf_name(rf->type));
+}
+
+int zd_rf_set_channel(struct zd_rf *rf, u8 channel)
+{
+ int r;
+
+ ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex));
+ if (channel < MIN_CHANNEL24)
+ return -EINVAL;
+ if (channel > MAX_CHANNEL24)
+ return -EINVAL;
+ dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel);
+
+ r = rf->set_channel(rf, channel);
+ if (r >= 0)
+ rf->channel = channel;
+ return r;
+}
+
+int zd_switch_radio_on(struct zd_rf *rf)
+{
+ int r, t;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ return r;
+ t = rf->switch_radio_on(rf);
+ r = zd_chip_unlock_phy_regs(chip);
+ if (t)
+ r = t;
+ return r;
+}
+
+int zd_switch_radio_off(struct zd_rf *rf)
+{
+ int r, t;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ /* TODO: move phy regs handling to zd_chip */
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_chip_lock_phy_regs(chip);
+ if (r)
+ return r;
+ t = rf->switch_radio_off(rf);
+ r = zd_chip_unlock_phy_regs(chip);
+ if (t)
+ r = t;
+ return r;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
new file mode 100644
index 000000000000..ea30f693fcc8
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -0,0 +1,82 @@
+/* zd_rf.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_RF_H
+#define _ZD_RF_H
+
+#include "zd_types.h"
+
+#define UW2451_RF 0x2
+#define UCHIP_RF 0x3
+#define AL2230_RF 0x4
+#define AL7230B_RF 0x5 /* a,b,g */
+#define THETA_RF 0x6
+#define AL2210_RF 0x7
+#define MAXIM_NEW_RF 0x8
+#define UW2453_RF 0x9
+#define AL2230S_RF 0xa
+#define RALINK_RF 0xb
+#define INTERSIL_RF 0xc
+#define RF2959_RF 0xd
+#define MAXIM_NEW2_RF 0xe
+#define PHILIPS_RF 0xf
+
+#define RF_CHANNEL(ch) [(ch)-1]
+
+/* Provides functions of the RF transceiver. */
+
+enum {
+ RF_REG_BITS = 6,
+ RF_VALUE_BITS = 18,
+ RF_RV_BITS = RF_REG_BITS + RF_VALUE_BITS,
+};
+
+struct zd_rf {
+ u8 type;
+
+ u8 channel;
+ /*
+ * Whether this RF should patch the 6M band edge
+ * (assuming E2P_POD agrees)
+ */
+ u8 patch_6m_band_edge:1;
+
+ /* RF-specific functions */
+ int (*init_hw)(struct zd_rf *rf);
+ int (*set_channel)(struct zd_rf *rf, u8 channel);
+ int (*switch_radio_on)(struct zd_rf *rf);
+ int (*switch_radio_off)(struct zd_rf *rf);
+};
+
+const char *zd_rf_name(u8 type);
+void zd_rf_init(struct zd_rf *rf);
+void zd_rf_clear(struct zd_rf *rf);
+int zd_rf_init_hw(struct zd_rf *rf, u8 type);
+
+int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size);
+
+int zd_rf_set_channel(struct zd_rf *rf, u8 channel);
+
+int zd_switch_radio_on(struct zd_rf *rf);
+int zd_switch_radio_off(struct zd_rf *rf);
+
+/* Functions for individual RF chips */
+
+int zd_rf_init_rf2959(struct zd_rf *rf);
+int zd_rf_init_al2230(struct zd_rf *rf);
+
+#endif /* _ZD_RF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
new file mode 100644
index 000000000000..0948b25f660d
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -0,0 +1,308 @@
+/* zd_rf_al2230.c: Functions for the AL2230 RF controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include "zd_rf.h"
+#include "zd_usb.h"
+#include "zd_chip.h"
+
+static const u32 al2230_table[][3] = {
+ RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
+ RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
+ RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, },
+ RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, },
+ RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, },
+ RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, },
+ RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, },
+ RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, },
+ RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, },
+ RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, },
+ RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, },
+ RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, },
+ RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, },
+ RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, },
+};
+
+static int zd1211_al2230_init_hw(struct zd_rf *rf)
+{
+ int r;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 },
+ { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 },
+ { CR44, 0x33 }, { CR106, 0x2a }, { CR107, 0x1a },
+ { CR109, 0x09 }, { CR110, 0x27 }, { CR111, 0x2b },
+ { CR112, 0x2b }, { CR119, 0x0a }, { CR10, 0x89 },
+ /* for newest (3rd cut) AL2300 */
+ { CR17, 0x28 },
+ { CR26, 0x93 }, { CR34, 0x30 },
+ /* for newest (3rd cut) AL2300 */
+ { CR35, 0x3e },
+ { CR41, 0x24 }, { CR44, 0x32 },
+ /* for newest (3rd cut) AL2300 */
+ { CR46, 0x96 },
+ { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 },
+ { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 },
+ { CR92, 0x0a }, { CR99, 0x28 }, { CR100, 0x00 },
+ { CR101, 0x13 }, { CR102, 0x27 }, { CR106, 0x24 },
+ { CR107, 0x2a }, { CR109, 0x09 }, { CR110, 0x13 },
+ { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 },
+ { CR114, 0x27 },
+ /* for newest (3rd cut) AL2300 */
+ { CR115, 0x24 },
+ { CR116, 0x24 }, { CR117, 0xf4 }, { CR118, 0xfc },
+ { CR119, 0x10 }, { CR120, 0x4f }, { CR121, 0x77 },
+ { CR122, 0xe0 }, { CR137, 0x88 }, { CR252, 0xff },
+ { CR253, 0xff },
+
+ /* These following happen separately in the vendor driver */
+ { },
+
+ /* shdnb(PLL_ON)=0 */
+ { CR251, 0x2f },
+ /* shdnb(PLL_ON)=1 */
+ { CR251, 0x3f },
+ { CR138, 0x28 }, { CR203, 0x06 },
+ };
+
+ static const u32 rv[] = {
+ /* Channel 1 */
+ 0x03f790,
+ 0x033331,
+ 0x00000d,
+
+ 0x0b3331,
+ 0x03b812,
+ 0x00fff3,
+ 0x000da4,
+ 0x0f4dc5, /* fix freq shift, 0x04edc5 */
+ 0x0805b6,
+ 0x011687,
+ 0x000688,
+ 0x0403b9, /* external control TX power (CR31) */
+ 0x00dbba,
+ 0x00099b,
+ 0x0bdffc,
+ 0x00000d,
+ 0x00500f,
+
+ /* These writes happen separately in the vendor driver */
+ 0x00d00f,
+ 0x004c0f,
+ 0x00540f,
+ 0x00700f,
+ 0x00500f,
+ };
+
+ r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r)
+ return r;
+
+ r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int zd1211b_al2230_init_hw(struct zd_rf *rf)
+{
+ int r;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ static const struct zd_ioreq16 ioreqs1[] = {
+ { CR10, 0x89 }, { CR15, 0x20 },
+ { CR17, 0x2B }, /* for newest(3rd cut) AL2230 */
+ { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 },
+ { CR28, 0x3e }, { CR29, 0x00 },
+ { CR33, 0x28 }, /* 5621 */
+ { CR34, 0x30 },
+ { CR35, 0x3e }, /* for newest(3rd cut) AL2230 */
+ { CR41, 0x24 }, { CR44, 0x32 },
+ { CR46, 0x99 }, /* for newest(3rd cut) AL2230 */
+ { CR47, 0x1e },
+
+ /* ZD1211B 05.06.10 */
+ { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 },
+ { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 },
+ { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 },
+ { CR69, 0x28 },
+
+ { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 },
+ { CR87, 0x0a }, { CR89, 0x04 },
+ { CR91, 0x00 }, /* 5621 */
+ { CR92, 0x0a },
+ { CR98, 0x8d }, /* 4804, for 1212 new algorithm */
+ { CR99, 0x00 }, /* 5621 */
+ { CR101, 0x13 }, { CR102, 0x27 },
+ { CR106, 0x24 }, /* for newest(3rd cut) AL2230 */
+ { CR107, 0x2a },
+ { CR109, 0x13 }, /* 4804, for 1212 new algorithm */
+ { CR110, 0x1f }, /* 4804, for 1212 new algorithm */
+ { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 },
+ { CR114, 0x27 },
+ { CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) AL2230 */
+ { CR116, 0x24 },
+ { CR117, 0xfa }, /* for 1211b */
+ { CR118, 0xfa }, /* for 1211b */
+ { CR119, 0x10 },
+ { CR120, 0x4f },
+ { CR121, 0x6c }, /* for 1211b */
+ { CR122, 0xfc }, /* E0->FC at 4902 */
+ { CR123, 0x57 }, /* 5623 */
+ { CR125, 0xad }, /* 4804, for 1212 new algorithm */
+ { CR126, 0x6c }, /* 5614 */
+ { CR127, 0x03 }, /* 4804, for 1212 new algorithm */
+ { CR137, 0x50 }, /* 5614 */
+ { CR138, 0xa8 },
+ { CR144, 0xac }, /* 5621 */
+ { CR150, 0x0d }, { CR252, 0x00 }, { CR253, 0x00 },
+ };
+
+ static const u32 rv1[] = {
+ /* channel 1 */
+ 0x03f790,
+ 0x033331,
+ 0x00000d,
+
+ 0x0b3331,
+ 0x03b812,
+ 0x00fff3,
+ 0x0005a4,
+ 0x0f4dc5, /* fix freq shift 0x044dc5 */
+ 0x0805b6,
+ 0x0146c7,
+ 0x000688,
+ 0x0403b9, /* External control TX power (CR31) */
+ 0x00dbba,
+ 0x00099b,
+ 0x0bdffc,
+ 0x00000d,
+ 0x00580f,
+ };
+
+ static const struct zd_ioreq16 ioreqs2[] = {
+ { CR47, 0x1e }, { CR_RFCFG, 0x03 },
+ };
+
+ static const u32 rv2[] = {
+ 0x00880f,
+ 0x00080f,
+ };
+
+ static const struct zd_ioreq16 ioreqs3[] = {
+ { CR_RFCFG, 0x00 }, { CR47, 0x1e }, { CR251, 0x7f },
+ };
+
+ static const u32 rv3[] = {
+ 0x00d80f,
+ 0x00780f,
+ 0x00580f,
+ };
+
+ static const struct zd_ioreq16 ioreqs4[] = {
+ { CR138, 0x28 }, { CR203, 0x06 },
+ };
+
+ r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1));
+ if (r)
+ return r;
+ r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS);
+ if (r)
+ return r;
+ r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2));
+ if (r)
+ return r;
+ r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS);
+ if (r)
+ return r;
+ r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3));
+ if (r)
+ return r;
+ r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS);
+ if (r)
+ return r;
+ return zd_iowrite16a_locked(chip, ioreqs4, ARRAY_SIZE(ioreqs4));
+}
+
+static int al2230_set_channel(struct zd_rf *rf, u8 channel)
+{
+ int r;
+ const u32 *rv = al2230_table[channel-1];
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR138, 0x28 },
+ { CR203, 0x06 },
+ };
+
+ r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS);
+ if (r)
+ return r;
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int zd1211_al2230_switch_radio_on(struct zd_rf *rf)
+{
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR11, 0x00 },
+ { CR251, 0x3f },
+ };
+
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf)
+{
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR11, 0x00 },
+ { CR251, 0x7f },
+ };
+
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int al2230_switch_radio_off(struct zd_rf *rf)
+{
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR11, 0x04 },
+ { CR251, 0x2f },
+ };
+
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+int zd_rf_init_al2230(struct zd_rf *rf)
+{
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ rf->set_channel = al2230_set_channel;
+ rf->switch_radio_off = al2230_switch_radio_off;
+ if (chip->is_zd1211b) {
+ rf->init_hw = zd1211b_al2230_init_hw;
+ rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
+ } else {
+ rf->init_hw = zd1211_al2230_init_hw;
+ rf->switch_radio_on = zd1211_al2230_switch_radio_on;
+ }
+ rf->patch_6m_band_edge = 1;
+ return 0;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
new file mode 100644
index 000000000000..58247271cc24
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -0,0 +1,279 @@
+/* zd_rf_rfmd.c: Functions for the RFMD RF controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include "zd_rf.h"
+#include "zd_usb.h"
+#include "zd_chip.h"
+
+static u32 rf2959_table[][2] = {
+ RF_CHANNEL( 1) = { 0x181979, 0x1e6666 },
+ RF_CHANNEL( 2) = { 0x181989, 0x1e6666 },
+ RF_CHANNEL( 3) = { 0x181999, 0x1e6666 },
+ RF_CHANNEL( 4) = { 0x1819a9, 0x1e6666 },
+ RF_CHANNEL( 5) = { 0x1819b9, 0x1e6666 },
+ RF_CHANNEL( 6) = { 0x1819c9, 0x1e6666 },
+ RF_CHANNEL( 7) = { 0x1819d9, 0x1e6666 },
+ RF_CHANNEL( 8) = { 0x1819e9, 0x1e6666 },
+ RF_CHANNEL( 9) = { 0x1819f9, 0x1e6666 },
+ RF_CHANNEL(10) = { 0x181a09, 0x1e6666 },
+ RF_CHANNEL(11) = { 0x181a19, 0x1e6666 },
+ RF_CHANNEL(12) = { 0x181a29, 0x1e6666 },
+ RF_CHANNEL(13) = { 0x181a39, 0x1e6666 },
+ RF_CHANNEL(14) = { 0x181a60, 0x1c0000 },
+};
+
+#if 0
+static int bits(u32 rw, int from, int to)
+{
+ rw &= ~(0xffffffffU << (to+1));
+ rw >>= from;
+ return rw;
+}
+
+static int bit(u32 rw, int bit)
+{
+ return bits(rw, bit, bit);
+}
+
+static void dump_regwrite(u32 rw)
+{
+ int reg = bits(rw, 18, 22);
+ int rw_flag = bits(rw, 23, 23);
+ PDEBUG("rf2959 %#010x reg %d rw %d", rw, reg, rw_flag);
+
+ switch (reg) {
+ case 0:
+ PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d"
+ " if_vco_reg_en %d if_vga_en %d",
+ bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1),
+ bit(rw, 0));
+ break;
+ case 1:
+ PDEBUG("reg1 IFPLL1 pll_en1 %d kv_en1 %d vtc_en1 %d lpf1 %d"
+ " cpl1 %d pdp1 %d autocal_en1 %d ld_en1 %d ifloopr %d"
+ " ifloopc %d dac1 %d",
+ bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14),
+ bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10),
+ bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0, 3));
+ break;
+ case 2:
+ PDEBUG("reg2 IFPLL2 n1 %d num1 %d",
+ bits(rw, 6, 17), bits(rw, 0, 5));
+ break;
+ case 3:
+ PDEBUG("reg3 IFPLL3 num %d", bits(rw, 0, 17));
+ break;
+ case 4:
+ PDEBUG("reg4 IFPLL4 dn1 %#04x ct_def1 %d kv_def1 %d",
+ bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3));
+ break;
+ case 5:
+ PDEBUG("reg5 RFPLL1 pll_en %d kv_en %d vtc_en %d lpf %d cpl %d"
+ " pdp %d autocal_en %d ld_en %d rfloopr %d rfloopc %d"
+ " dac %d",
+ bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14),
+ bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10),
+ bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0,3));
+ break;
+ case 6:
+ PDEBUG("reg6 RFPLL2 n %d num %d",
+ bits(rw, 6, 17), bits(rw, 0, 5));
+ break;
+ case 7:
+ PDEBUG("reg7 RFPLL3 num2 %d", bits(rw, 0, 17));
+ break;
+ case 8:
+ PDEBUG("reg8 RFPLL4 dn %#06x ct_def %d kv_def %d",
+ bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3));
+ break;
+ case 9:
+ PDEBUG("reg9 CAL1 tvco %d tlock %d m_ct_value %d ld_window %d",
+ bits(rw, 13, 17), bits(rw, 8, 12), bits(rw, 3, 7),
+ bits(rw, 0, 2));
+ break;
+ case 10:
+ PDEBUG("reg10 TXRX1 rxdcfbbyps %d pcontrol %d txvgc %d"
+ " rxlpfbw %d txlpfbw %d txdiffmode %d txenmode %d"
+ " intbiasen %d tybypass %d",
+ bit(rw, 17), bits(rw, 15, 16), bits(rw, 10, 14),
+ bits(rw, 7, 9), bits(rw, 4, 6), bit(rw, 3), bit(rw, 2),
+ bit(rw, 1), bit(rw, 0));
+ break;
+ case 11:
+ PDEBUG("reg11 PCNT1 mid_bias %d p_desired %d pc_offset %d"
+ " tx_delay %d",
+ bits(rw, 15, 17), bits(rw, 9, 14), bits(rw, 3, 8),
+ bits(rw, 0, 2));
+ break;
+ case 12:
+ PDEBUG("reg12 PCNT2 max_power %d mid_power %d min_power %d",
+ bits(rw, 12, 17), bits(rw, 6, 11), bits(rw, 0, 5));
+ break;
+ case 13:
+ PDEBUG("reg13 VCOT1 rfpll vco comp %d ifpll vco comp %d"
+ " lobias %d if_biasbuf %d if_biasvco %d rf_biasbuf %d"
+ " rf_biasvco %d",
+ bit(rw, 17), bit(rw, 16), bit(rw, 15),
+ bits(rw, 8, 9), bits(rw, 5, 7), bits(rw, 3, 4),
+ bits(rw, 0, 2));
+ break;
+ case 14:
+ PDEBUG("reg14 IQCAL rx_acal %d rx_pcal %d"
+ " tx_acal %d tx_pcal %d",
+ bits(rw, 13, 17), bits(rw, 9, 12), bits(rw, 4, 8),
+ bits(rw, 0, 3));
+ break;
+ }
+}
+#endif /* 0 */
+
+static int rf2959_init_hw(struct zd_rf *rf)
+{
+ int r;
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR2, 0x1E }, { CR9, 0x20 }, { CR10, 0x89 },
+ { CR11, 0x00 }, { CR15, 0xD0 }, { CR17, 0x68 },
+ { CR19, 0x4a }, { CR20, 0x0c }, { CR21, 0x0E },
+ { CR23, 0x48 },
+ /* normal size for cca threshold */
+ { CR24, 0x14 },
+ /* { CR24, 0x20 }, */
+ { CR26, 0x90 }, { CR27, 0x30 }, { CR29, 0x20 },
+ { CR31, 0xb2 }, { CR32, 0x43 }, { CR33, 0x28 },
+ { CR38, 0x30 }, { CR34, 0x0f }, { CR35, 0xF0 },
+ { CR41, 0x2a }, { CR46, 0x7F }, { CR47, 0x1E },
+ { CR51, 0xc5 }, { CR52, 0xc5 }, { CR53, 0xc5 },
+ { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 },
+ { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 },
+ { CR85, 0x00 }, { CR86, 0x10 }, { CR87, 0x2A },
+ { CR88, 0x10 }, { CR89, 0x24 }, { CR90, 0x18 },
+ /* { CR91, 0x18 }, */
+ /* should solve continous CTS frame problems */
+ { CR91, 0x00 },
+ { CR92, 0x0a }, { CR93, 0x00 }, { CR94, 0x01 },
+ { CR95, 0x00 }, { CR96, 0x40 }, { CR97, 0x37 },
+ { CR98, 0x05 }, { CR99, 0x28 }, { CR100, 0x00 },
+ { CR101, 0x13 }, { CR102, 0x27 }, { CR103, 0x27 },
+ { CR104, 0x18 }, { CR105, 0x12 },
+ /* normal size */
+ { CR106, 0x1a },
+ /* { CR106, 0x22 }, */
+ { CR107, 0x24 }, { CR108, 0x0a }, { CR109, 0x13 },
+ { CR110, 0x2F }, { CR111, 0x27 }, { CR112, 0x27 },
+ { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x40 },
+ { CR116, 0x40 }, { CR117, 0xF0 }, { CR118, 0xF0 },
+ { CR119, 0x16 },
+ /* no TX continuation */
+ { CR122, 0x00 },
+ /* { CR122, 0xff }, */
+ { CR127, 0x03 }, { CR131, 0x08 }, { CR138, 0x28 },
+ { CR148, 0x44 }, { CR150, 0x10 }, { CR169, 0xBB },
+ { CR170, 0xBB },
+ };
+
+ static const u32 rv[] = {
+ 0x000007, /* REG0(CFG1) */
+ 0x07dd43, /* REG1(IFPLL1) */
+ 0x080959, /* REG2(IFPLL2) */
+ 0x0e6666,
+ 0x116a57, /* REG4 */
+ 0x17dd43, /* REG5 */
+ 0x1819f9, /* REG6 */
+ 0x1e6666,
+ 0x214554,
+ 0x25e7fa,
+ 0x27fffa,
+ /* The Zydas driver somehow forgets to set this value. It's
+ * only set for Japan. We are using internal power control
+ * for now.
+ */
+ 0x294128, /* internal power */
+ /* 0x28252c, */ /* External control TX power */
+ /* CR31_CCK, CR51_6-36M, CR52_48M, CR53_54M */
+ 0x2c0000,
+ 0x300000,
+ 0x340000, /* REG13(0xD) */
+ 0x381e0f, /* REG14(0xE) */
+ /* Bogus, RF2959's data sheet doesn't know register 27, which is
+ * actually referenced here. The commented 0x11 is 17.
+ */
+ 0x6c180f, /* REG27(0x11) */
+ };
+
+ r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+ if (r)
+ return r;
+
+ return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
+}
+
+static int rf2959_set_channel(struct zd_rf *rf, u8 channel)
+{
+ int i, r;
+ u32 *rv = rf2959_table[channel-1];
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ for (i = 0; i < 2; i++) {
+ r = zd_rfwrite_locked(chip, rv[i], RF_RV_BITS);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int rf2959_switch_radio_on(struct zd_rf *rf)
+{
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR10, 0x89 },
+ { CR11, 0x00 },
+ };
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int rf2959_switch_radio_off(struct zd_rf *rf)
+{
+ static const struct zd_ioreq16 ioreqs[] = {
+ { CR10, 0x15 },
+ { CR11, 0x81 },
+ };
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+int zd_rf_init_rf2959(struct zd_rf *rf)
+{
+ struct zd_chip *chip = zd_rf_to_chip(rf);
+
+ if (chip->is_zd1211b) {
+ dev_err(zd_chip_dev(chip),
+ "RF2959 is currently not supported for ZD1211B"
+ " devices\n");
+ return -ENODEV;
+ }
+ rf->init_hw = rf2959_init_hw;
+ rf->set_channel = rf2959_set_channel;
+ rf->switch_radio_on = rf2959_switch_radio_on;
+ rf->switch_radio_off = rf2959_switch_radio_off;
+ return 0;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h
new file mode 100644
index 000000000000..0155a1584ed3
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_types.h
@@ -0,0 +1,71 @@
+/* zd_types.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_TYPES_H
+#define _ZD_TYPES_H
+
+#include <linux/types.h>
+
+/* We have three register spaces mapped into the overall USB address space of
+ * 64K words (16-bit values). There is the control register space of
+ * double-word registers, the eeprom register space and the firmware register
+ * space. The control register space is byte mapped, the others are word
+ * mapped.
+ *
+ * For that reason, we are using byte offsets for control registers and word
+ * offsets for everything else.
+ */
+
+typedef u32 __nocast zd_addr_t;
+
+enum {
+ ADDR_BASE_MASK = 0xff000000,
+ ADDR_OFFSET_MASK = 0x0000ffff,
+ ADDR_ZERO_MASK = 0x00ff0000,
+ NULL_BASE = 0x00000000,
+ USB_BASE = 0x01000000,
+ CR_BASE = 0x02000000,
+ CR_MAX_OFFSET = 0x0b30,
+ E2P_BASE = 0x03000000,
+ E2P_MAX_OFFSET = 0x007e,
+ FW_BASE = 0x04000000,
+ FW_MAX_OFFSET = 0x0005,
+};
+
+#define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK)
+#define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK)
+
+#define ZD_ADDR(base, offset) \
+ ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK)))
+
+#define ZD_NULL_ADDR ((zd_addr_t)0)
+#define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */
+#define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */
+#define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */
+#define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */
+
+static inline zd_addr_t zd_inc_word(zd_addr_t addr)
+{
+ u32 base = ZD_ADDR_BASE(addr);
+ u32 offset = ZD_OFFSET(addr);
+
+ offset += base == CR_BASE ? 2 : 1;
+
+ return base | offset;
+}
+
+#endif /* _ZD_TYPES_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
new file mode 100644
index 000000000000..6320984126c7
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -0,0 +1,1309 @@
+/* zd_usb.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/unaligned.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/ieee80211.h>
+
+#include "zd_def.h"
+#include "zd_netdev.h"
+#include "zd_mac.h"
+#include "zd_usb.h"
+#include "zd_util.h"
+
+static struct usb_device_id usb_ids[] = {
+ /* ZD1211 */
+ { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
+ /* ZD1211B */
+ { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
+ {}
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip.");
+MODULE_AUTHOR("Ulrich Kunitz");
+MODULE_AUTHOR("Daniel Drake");
+MODULE_VERSION("1.0");
+MODULE_DEVICE_TABLE(usb, usb_ids);
+
+#define FW_ZD1211_PREFIX "zd1211/zd1211_"
+#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
+
+/* register address handling */
+
+#ifdef DEBUG
+static int check_addr(struct zd_usb *usb, zd_addr_t addr)
+{
+ u32 base = ZD_ADDR_BASE(addr);
+ u32 offset = ZD_OFFSET(addr);
+
+ if ((u32)addr & ADDR_ZERO_MASK)
+ goto invalid_address;
+ switch (base) {
+ case USB_BASE:
+ break;
+ case CR_BASE:
+ if (offset > CR_MAX_OFFSET) {
+ dev_dbg(zd_usb_dev(usb),
+ "CR offset %#010x larger than"
+ " CR_MAX_OFFSET %#10x\n",
+ offset, CR_MAX_OFFSET);
+ goto invalid_address;
+ }
+ if (offset & 1) {
+ dev_dbg(zd_usb_dev(usb),
+ "CR offset %#010x is not a multiple of 2\n",
+ offset);
+ goto invalid_address;
+ }
+ break;
+ case E2P_BASE:
+ if (offset > E2P_MAX_OFFSET) {
+ dev_dbg(zd_usb_dev(usb),
+ "E2P offset %#010x larger than"
+ " E2P_MAX_OFFSET %#010x\n",
+ offset, E2P_MAX_OFFSET);
+ goto invalid_address;
+ }
+ break;
+ case FW_BASE:
+ if (!usb->fw_base_offset) {
+ dev_dbg(zd_usb_dev(usb),
+ "ERROR: fw base offset has not been set\n");
+ return -EAGAIN;
+ }
+ if (offset > FW_MAX_OFFSET) {
+ dev_dbg(zd_usb_dev(usb),
+ "FW offset %#10x is larger than"
+ " FW_MAX_OFFSET %#010x\n",
+ offset, FW_MAX_OFFSET);
+ goto invalid_address;
+ }
+ break;
+ default:
+ dev_dbg(zd_usb_dev(usb),
+ "address has unsupported base %#010x\n", addr);
+ goto invalid_address;
+ }
+
+ return 0;
+invalid_address:
+ dev_dbg(zd_usb_dev(usb),
+ "ERROR: invalid address: %#010x\n", addr);
+ return -EINVAL;
+}
+#endif /* DEBUG */
+
+static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr)
+{
+ u32 base;
+ u16 offset;
+
+ base = ZD_ADDR_BASE(addr);
+ offset = ZD_OFFSET(addr);
+
+ ZD_ASSERT(check_addr(usb, addr) == 0);
+
+ switch (base) {
+ case CR_BASE:
+ offset += CR_BASE_OFFSET;
+ break;
+ case E2P_BASE:
+ offset += E2P_BASE_OFFSET;
+ break;
+ case FW_BASE:
+ offset += usb->fw_base_offset;
+ break;
+ }
+
+ return offset;
+}
+
+/* USB device initialization */
+
+static int request_fw_file(
+ const struct firmware **fw, const char *name, struct device *device)
+{
+ int r;
+
+ dev_dbg_f(device, "fw name %s\n", name);
+
+ r = request_firmware(fw, name, device);
+ if (r)
+ dev_err(device,
+ "Could not load firmware file %s. Error number %d\n",
+ name, r);
+ return r;
+}
+
+static inline u16 get_bcdDevice(const struct usb_device *udev)
+{
+ return le16_to_cpu(udev->descriptor.bcdDevice);
+}
+
+enum upload_code_flags {
+ REBOOT = 1,
+};
+
+/* Ensures that MAX_TRANSFER_SIZE is even. */
+#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1)
+
+static int upload_code(struct usb_device *udev,
+ const u8 *data, size_t size, u16 code_offset, int flags)
+{
+ u8 *p;
+ int r;
+
+ /* USB request blocks need "kmalloced" buffers.
+ */
+ p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL);
+ if (!p) {
+ dev_err(&udev->dev, "out of memory\n");
+ r = -ENOMEM;
+ goto error;
+ }
+
+ size &= ~1;
+ while (size > 0) {
+ size_t transfer_size = size <= MAX_TRANSFER_SIZE ?
+ size : MAX_TRANSFER_SIZE;
+
+ dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size);
+
+ memcpy(p, data, transfer_size);
+ r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_FIRMWARE_DOWNLOAD,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ code_offset, 0, p, transfer_size, 1000 /* ms */);
+ if (r < 0) {
+ dev_err(&udev->dev,
+ "USB control request for firmware upload"
+ " failed. Error number %d\n", r);
+ goto error;
+ }
+ transfer_size = r & ~1;
+
+ size -= transfer_size;
+ data += transfer_size;
+ code_offset += transfer_size/sizeof(u16);
+ }
+
+ if (flags & REBOOT) {
+ u8 ret;
+
+ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_FIRMWARE_CONFIRM,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, &ret, sizeof(ret), 5000 /* ms */);
+ if (r != sizeof(ret)) {
+ dev_err(&udev->dev,
+ "control request firmeware confirmation failed."
+ " Return value %d\n", r);
+ if (r >= 0)
+ r = -ENODEV;
+ goto error;
+ }
+ if (ret & 0x80) {
+ dev_err(&udev->dev,
+ "Internal error while downloading."
+ " Firmware confirm return value %#04x\n",
+ (unsigned int)ret);
+ r = -ENODEV;
+ goto error;
+ }
+ dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n",
+ (unsigned int)ret);
+ }
+
+ r = 0;
+error:
+ kfree(p);
+ return r;
+}
+
+static u16 get_word(const void *data, u16 offset)
+{
+ const __le16 *p = data;
+ return le16_to_cpu(p[offset]);
+}
+
+static char *get_fw_name(char *buffer, size_t size, u8 device_type,
+ const char* postfix)
+{
+ scnprintf(buffer, size, "%s%s",
+ device_type == DEVICE_ZD1211B ?
+ FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX,
+ postfix);
+ return buffer;
+}
+
+static int upload_firmware(struct usb_device *udev, u8 device_type)
+{
+ int r;
+ u16 fw_bcdDevice;
+ u16 bcdDevice;
+ const struct firmware *ub_fw = NULL;
+ const struct firmware *uph_fw = NULL;
+ char fw_name[128];
+
+ bcdDevice = get_bcdDevice(udev);
+
+ r = request_fw_file(&ub_fw,
+ get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"),
+ &udev->dev);
+ if (r)
+ goto error;
+
+ fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET);
+
+ /* FIXME: do we have any reason to perform the kludge that the vendor
+ * driver does when there is a version mismatch? (their driver uploads
+ * different firmwares and stuff)
+ */
+ if (fw_bcdDevice != bcdDevice) {
+ dev_info(&udev->dev,
+ "firmware device id %#06x and actual device id "
+ "%#06x differ, continuing anyway\n",
+ fw_bcdDevice, bcdDevice);
+ } else {
+ dev_dbg_f(&udev->dev,
+ "firmware device id %#06x is equal to the "
+ "actual device id\n", fw_bcdDevice);
+ }
+
+
+ r = request_fw_file(&uph_fw,
+ get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"),
+ &udev->dev);
+ if (r)
+ goto error;
+
+ r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET,
+ REBOOT);
+ if (r) {
+ dev_err(&udev->dev,
+ "Could not upload firmware code uph. Error number %d\n",
+ r);
+ }
+
+ /* FALL-THROUGH */
+error:
+ release_firmware(ub_fw);
+ release_firmware(uph_fw);
+ return r;
+}
+
+static void disable_read_regs_int(struct zd_usb *usb)
+{
+ struct zd_usb_interrupt *intr = &usb->intr;
+
+ spin_lock(&intr->lock);
+ intr->read_regs_enabled = 0;
+ spin_unlock(&intr->lock);
+}
+
+#define urb_dev(urb) (&(urb)->dev->dev)
+
+static inline void handle_regs_int(struct urb *urb)
+{
+ struct zd_usb *usb = urb->context;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ int len;
+
+ ZD_ASSERT(in_interrupt());
+ spin_lock(&intr->lock);
+
+ if (intr->read_regs_enabled) {
+ intr->read_regs.length = len = urb->actual_length;
+
+ if (len > sizeof(intr->read_regs.buffer))
+ len = sizeof(intr->read_regs.buffer);
+ memcpy(intr->read_regs.buffer, urb->transfer_buffer, len);
+ intr->read_regs_enabled = 0;
+ complete(&intr->read_regs.completion);
+ goto out;
+ }
+
+ dev_dbg_f(urb_dev(urb), "regs interrupt ignored\n");
+out:
+ spin_unlock(&intr->lock);
+}
+
+static inline void handle_retry_failed_int(struct urb *urb)
+{
+ dev_dbg_f(urb_dev(urb), "retry failed interrupt\n");
+}
+
+
+static void int_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
+{
+ int r;
+ struct usb_int_header *hdr;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ goto kfree;
+ default:
+ goto resubmit;
+ }
+
+ if (urb->actual_length < sizeof(hdr)) {
+ dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb);
+ goto resubmit;
+ }
+
+ hdr = urb->transfer_buffer;
+ if (hdr->type != USB_INT_TYPE) {
+ dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb);
+ goto resubmit;
+ }
+
+ switch (hdr->id) {
+ case USB_INT_ID_REGS:
+ handle_regs_int(urb);
+ break;
+ case USB_INT_ID_RETRY_FAILED:
+ handle_retry_failed_int(urb);
+ break;
+ default:
+ dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
+ (unsigned int)hdr->id);
+ goto resubmit;
+ }
+
+resubmit:
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r) {
+ dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
+ goto kfree;
+ }
+ return;
+kfree:
+ kfree(urb->transfer_buffer);
+}
+
+static inline int int_urb_interval(struct usb_device *udev)
+{
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ return 4;
+ case USB_SPEED_LOW:
+ return 10;
+ case USB_SPEED_FULL:
+ default:
+ return 1;
+ }
+}
+
+static inline int usb_int_enabled(struct zd_usb *usb)
+{
+ unsigned long flags;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ struct urb *urb;
+
+ spin_lock_irqsave(&intr->lock, flags);
+ urb = intr->urb;
+ spin_unlock_irqrestore(&intr->lock, flags);
+ return urb != NULL;
+}
+
+int zd_usb_enable_int(struct zd_usb *usb)
+{
+ int r;
+ struct usb_device *udev;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ void *transfer_buffer = NULL;
+ struct urb *urb;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ urb = usb_alloc_urb(0, GFP_NOFS);
+ if (!urb) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&intr->lock);
+ if (intr->urb) {
+ spin_unlock_irq(&intr->lock);
+ r = 0;
+ goto error_free_urb;
+ }
+ intr->urb = urb;
+ spin_unlock_irq(&intr->lock);
+
+ /* TODO: make it a DMA buffer */
+ r = -ENOMEM;
+ transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_NOFS);
+ if (!transfer_buffer) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "couldn't allocate transfer_buffer\n");
+ goto error_set_urb_null;
+ }
+
+ udev = zd_usb_to_usbdev(usb);
+ usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
+ transfer_buffer, USB_MAX_EP_INT_BUFFER,
+ int_urb_complete, usb,
+ intr->interval);
+
+ dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
+ r = usb_submit_urb(urb, GFP_NOFS);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "Couldn't submit urb. Error number %d\n", r);
+ goto error;
+ }
+
+ return 0;
+error:
+ kfree(transfer_buffer);
+error_set_urb_null:
+ spin_lock_irq(&intr->lock);
+ intr->urb = NULL;
+ spin_unlock_irq(&intr->lock);
+error_free_urb:
+ usb_free_urb(urb);
+out:
+ return r;
+}
+
+void zd_usb_disable_int(struct zd_usb *usb)
+{
+ unsigned long flags;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ struct urb *urb;
+
+ spin_lock_irqsave(&intr->lock, flags);
+ urb = intr->urb;
+ if (!urb) {
+ spin_unlock_irqrestore(&intr->lock, flags);
+ return;
+ }
+ intr->urb = NULL;
+ spin_unlock_irqrestore(&intr->lock, flags);
+
+ usb_kill_urb(urb);
+ dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
+ usb_free_urb(urb);
+}
+
+static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
+ unsigned int length)
+{
+ int i;
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+ const struct rx_length_info *length_info;
+
+ if (length < sizeof(struct rx_length_info)) {
+ /* It's not a complete packet anyhow. */
+ return;
+ }
+ length_info = (struct rx_length_info *)
+ (buffer + length - sizeof(struct rx_length_info));
+
+ /* It might be that three frames are merged into a single URB
+ * transaction. We have to check for the length info tag.
+ *
+ * While testing we discovered that length_info might be unaligned,
+ * because if USB transactions are merged, the last packet will not
+ * be padded. Unaligned access might also happen if the length_info
+ * structure is not present.
+ */
+ if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
+ {
+ unsigned int l, k, n;
+ for (i = 0, l = 0;; i++) {
+ k = le16_to_cpu(get_unaligned(&length_info->length[i]));
+ n = l+k;
+ if (n > length)
+ return;
+ zd_mac_rx(mac, buffer+l, k);
+ if (i >= 2)
+ return;
+ l = (n+3) & ~3;
+ }
+ } else {
+ zd_mac_rx(mac, buffer, length);
+ }
+}
+
+static void rx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
+{
+ struct zd_usb *usb;
+ struct zd_usb_rx *rx;
+ const u8 *buffer;
+ unsigned int length;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ return;
+ default:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ goto resubmit;
+ }
+
+ buffer = urb->transfer_buffer;
+ length = urb->actual_length;
+ usb = urb->context;
+ rx = &usb->rx;
+
+ if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
+ /* If there is an old first fragment, we don't care. */
+ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
+ ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
+ spin_lock(&rx->lock);
+ memcpy(rx->fragment, buffer, length);
+ rx->fragment_length = length;
+ spin_unlock(&rx->lock);
+ goto resubmit;
+ }
+
+ spin_lock(&rx->lock);
+ if (rx->fragment_length > 0) {
+ /* We are on a second fragment, we believe */
+ ZD_ASSERT(length + rx->fragment_length <=
+ ARRAY_SIZE(rx->fragment));
+ dev_dbg_f(urb_dev(urb), "*** second fragment ***\n");
+ memcpy(rx->fragment+rx->fragment_length, buffer, length);
+ handle_rx_packet(usb, rx->fragment,
+ rx->fragment_length + length);
+ rx->fragment_length = 0;
+ spin_unlock(&rx->lock);
+ } else {
+ spin_unlock(&rx->lock);
+ handle_rx_packet(usb, buffer, length);
+ }
+
+resubmit:
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+struct urb *alloc_urb(struct zd_usb *usb)
+{
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
+ struct urb *urb;
+ void *buffer;
+
+ urb = usb_alloc_urb(0, GFP_NOFS);
+ if (!urb)
+ return NULL;
+ buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_NOFS,
+ &urb->transfer_dma);
+ if (!buffer) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
+ buffer, USB_MAX_RX_SIZE,
+ rx_urb_complete, usb);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+void free_urb(struct urb *urb)
+{
+ if (!urb)
+ return;
+ usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+ int i, r;
+ struct zd_usb_rx *rx = &usb->rx;
+ struct urb **urbs;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ r = -ENOMEM;
+ urbs = kcalloc(URBS_COUNT, sizeof(struct urb *), GFP_NOFS);
+ if (!urbs)
+ goto error;
+ for (i = 0; i < URBS_COUNT; i++) {
+ urbs[i] = alloc_urb(usb);
+ if (!urbs[i])
+ goto error;
+ }
+
+ ZD_ASSERT(!irqs_disabled());
+ spin_lock_irq(&rx->lock);
+ if (rx->urbs) {
+ spin_unlock_irq(&rx->lock);
+ r = 0;
+ goto error;
+ }
+ rx->urbs = urbs;
+ rx->urbs_count = URBS_COUNT;
+ spin_unlock_irq(&rx->lock);
+
+ for (i = 0; i < URBS_COUNT; i++) {
+ r = usb_submit_urb(urbs[i], GFP_NOFS);
+ if (r)
+ goto error_submit;
+ }
+
+ return 0;
+error_submit:
+ for (i = 0; i < URBS_COUNT; i++) {
+ usb_kill_urb(urbs[i]);
+ }
+ spin_lock_irq(&rx->lock);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+ spin_unlock_irq(&rx->lock);
+error:
+ if (urbs) {
+ for (i = 0; i < URBS_COUNT; i++)
+ free_urb(urbs[i]);
+ }
+ return r;
+}
+
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+ int i;
+ unsigned long flags;
+ struct urb **urbs;
+ unsigned int count;
+ struct zd_usb_rx *rx = &usb->rx;
+
+ spin_lock_irqsave(&rx->lock, flags);
+ urbs = rx->urbs;
+ count = rx->urbs_count;
+ spin_unlock_irqrestore(&rx->lock, flags);
+ if (!urbs)
+ return;
+
+ for (i = 0; i < count; i++) {
+ usb_kill_urb(urbs[i]);
+ free_urb(urbs[i]);
+ }
+ kfree(urbs);
+
+ spin_lock_irqsave(&rx->lock, flags);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+ spin_unlock_irqrestore(&rx->lock, flags);
+}
+
+static void tx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
+{
+ int r;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ break;
+ default:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ goto resubmit;
+ }
+free_urb:
+ usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ return;
+resubmit:
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r) {
+ dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
+ goto free_urb;
+ }
+}
+
+/* Puts the frame on the USB endpoint. It doesn't wait for
+ * completion. The frame must contain the control set.
+ */
+int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length)
+{
+ int r;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
+ struct urb *urb;
+ void *buffer;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ buffer = usb_buffer_alloc(zd_usb_to_usbdev(usb), length, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buffer) {
+ r = -ENOMEM;
+ goto error_free_urb;
+ }
+ memcpy(buffer, frame, length);
+
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
+ buffer, length, tx_urb_complete, NULL);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ goto error;
+ return 0;
+error:
+ usb_buffer_free(zd_usb_to_usbdev(usb), length, buffer,
+ urb->transfer_dma);
+error_free_urb:
+ usb_free_urb(urb);
+out:
+ return r;
+}
+
+static inline void init_usb_interrupt(struct zd_usb *usb)
+{
+ struct zd_usb_interrupt *intr = &usb->intr;
+
+ spin_lock_init(&intr->lock);
+ intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
+ init_completion(&intr->read_regs.completion);
+ intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT));
+}
+
+static inline void init_usb_rx(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+ spin_lock_init(&rx->lock);
+ if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
+ rx->usb_packet_size = 512;
+ } else {
+ rx->usb_packet_size = 64;
+ }
+ ZD_ASSERT(rx->fragment_length == 0);
+}
+
+static inline void init_usb_tx(struct zd_usb *usb)
+{
+ /* FIXME: at this point we will allocate a fixed number of urb's for
+ * use in a cyclic scheme */
+}
+
+void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
+ struct usb_interface *intf)
+{
+ memset(usb, 0, sizeof(*usb));
+ usb->intf = usb_get_intf(intf);
+ usb_set_intfdata(usb->intf, netdev);
+ init_usb_interrupt(usb);
+ init_usb_tx(usb);
+ init_usb_rx(usb);
+}
+
+int zd_usb_init_hw(struct zd_usb *usb)
+{
+ int r;
+ struct zd_chip *chip = zd_usb_to_chip(usb);
+
+ ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ r = zd_ioread16_locked(chip, &usb->fw_base_offset,
+ USB_REG((u16)FW_BASE_ADDR_OFFSET));
+ if (r)
+ return r;
+ dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n",
+ usb->fw_base_offset);
+
+ return 0;
+}
+
+void zd_usb_clear(struct zd_usb *usb)
+{
+ usb_set_intfdata(usb->intf, NULL);
+ usb_put_intf(usb->intf);
+ memset(usb, 0, sizeof(*usb));
+ /* FIXME: usb_interrupt, usb_tx, usb_rx? */
+}
+
+static const char *speed(enum usb_device_speed speed)
+{
+ switch (speed) {
+ case USB_SPEED_LOW:
+ return "low";
+ case USB_SPEED_FULL:
+ return "full";
+ case USB_SPEED_HIGH:
+ return "high";
+ default:
+ return "unknown speed";
+ }
+}
+
+static int scnprint_id(struct usb_device *udev, char *buffer, size_t size)
+{
+ return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ get_bcdDevice(udev),
+ speed(udev->speed));
+}
+
+int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size)
+{
+ struct usb_device *udev = interface_to_usbdev(usb->intf);
+ return scnprint_id(udev, buffer, size);
+}
+
+#ifdef DEBUG
+static void print_id(struct usb_device *udev)
+{
+ char buffer[40];
+
+ scnprint_id(udev, buffer, sizeof(buffer));
+ buffer[sizeof(buffer)-1] = 0;
+ dev_dbg_f(&udev->dev, "%s\n", buffer);
+}
+#else
+#define print_id(udev) do { } while (0)
+#endif
+
+static int probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ int r;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct net_device *netdev = NULL;
+
+ print_id(udev);
+
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ break;
+ default:
+ dev_dbg_f(&intf->dev, "Unknown USB speed\n");
+ r = -ENODEV;
+ goto error;
+ }
+
+ netdev = zd_netdev_alloc(intf);
+ if (netdev == NULL) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = upload_firmware(udev, id->driver_info);
+ if (r) {
+ dev_err(&intf->dev,
+ "couldn't load firmware. Error number %d\n", r);
+ goto error;
+ }
+
+ r = usb_reset_configuration(udev);
+ if (r) {
+ dev_dbg_f(&intf->dev,
+ "couldn't reset configuration. Error number %d\n", r);
+ goto error;
+ }
+
+ /* At this point the interrupt endpoint is not generally enabled. We
+ * save the USB bandwidth until the network device is opened. But
+ * notify that the initialization of the MAC will require the
+ * interrupts to be temporary enabled.
+ */
+ r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info);
+ if (r) {
+ dev_dbg_f(&intf->dev,
+ "couldn't initialize mac. Error number %d\n", r);
+ goto error;
+ }
+
+ r = register_netdev(netdev);
+ if (r) {
+ dev_dbg_f(&intf->dev,
+ "couldn't register netdev. Error number %d\n", r);
+ goto error;
+ }
+
+ dev_dbg_f(&intf->dev, "successful\n");
+ dev_info(&intf->dev,"%s\n", netdev->name);
+ return 0;
+error:
+ usb_reset_device(interface_to_usbdev(intf));
+ zd_netdev_free(netdev);
+ return r;
+}
+
+static void disconnect(struct usb_interface *intf)
+{
+ struct net_device *netdev = zd_intf_to_netdev(intf);
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct zd_usb *usb = &mac->chip.usb;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ zd_netdev_disconnect(netdev);
+
+ /* Just in case something has gone wrong! */
+ zd_usb_disable_rx(usb);
+ zd_usb_disable_int(usb);
+
+ /* If the disconnect has been caused by a removal of the
+ * driver module, the reset allows reloading of the driver. If the
+ * reset will not be executed here, the upload of the firmware in the
+ * probe function caused by the reloading of the driver will fail.
+ */
+ usb_reset_device(interface_to_usbdev(intf));
+
+ /* If somebody still waits on this lock now, this is an error. */
+ zd_netdev_free(netdev);
+ dev_dbg(&intf->dev, "disconnected\n");
+}
+
+static struct usb_driver driver = {
+ .name = "zd1211rw",
+ .id_table = usb_ids,
+ .probe = probe,
+ .disconnect = disconnect,
+};
+
+static int __init usb_init(void)
+{
+ int r;
+
+ pr_debug("usb_init()\n");
+
+ r = usb_register(&driver);
+ if (r) {
+ printk(KERN_ERR "usb_register() failed. Error number %d\n", r);
+ return r;
+ }
+
+ pr_debug("zd1211rw initialized\n");
+ return 0;
+}
+
+static void __exit usb_exit(void)
+{
+ pr_debug("usb_exit()\n");
+ usb_deregister(&driver);
+}
+
+module_init(usb_init);
+module_exit(usb_exit);
+
+static int usb_int_regs_length(unsigned int count)
+{
+ return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
+}
+
+static void prepare_read_regs_int(struct zd_usb *usb)
+{
+ struct zd_usb_interrupt *intr = &usb->intr;
+
+ spin_lock(&intr->lock);
+ intr->read_regs_enabled = 1;
+ INIT_COMPLETION(intr->read_regs.completion);
+ spin_unlock(&intr->lock);
+}
+
+static int get_results(struct zd_usb *usb, u16 *values,
+ struct usb_req_read_regs *req, unsigned int count)
+{
+ int r;
+ int i;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ struct read_regs_int *rr = &intr->read_regs;
+ struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
+
+ spin_lock(&intr->lock);
+
+ r = -EIO;
+ /* The created block size seems to be larger than expected.
+ * However results appear to be correct.
+ */
+ if (rr->length < usb_int_regs_length(count)) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: actual length %d less than expected %d\n",
+ rr->length, usb_int_regs_length(count));
+ goto error_unlock;
+ }
+ if (rr->length > sizeof(rr->buffer)) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: actual length %d exceeds buffer size %zu\n",
+ rr->length, sizeof(rr->buffer));
+ goto error_unlock;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct reg_data *rd = &regs->regs[i];
+ if (rd->addr != req->addr[i]) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "rd[%d] addr %#06hx expected %#06hx\n", i,
+ le16_to_cpu(rd->addr),
+ le16_to_cpu(req->addr[i]));
+ goto error_unlock;
+ }
+ values[i] = le16_to_cpu(rd->value);
+ }
+
+ r = 0;
+error_unlock:
+ spin_unlock(&intr->lock);
+ return r;
+}
+
+int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
+ const zd_addr_t *addresses, unsigned int count)
+{
+ int r;
+ int i, req_len, actual_req_len;
+ struct usb_device *udev;
+ struct usb_req_read_regs *req = NULL;
+ unsigned long timeout;
+
+ if (count < 1) {
+ dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n");
+ return -EINVAL;
+ }
+ if (count > USB_MAX_IOREAD16_COUNT) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: count %u exceeds possible max %u\n",
+ count, USB_MAX_IOREAD16_COUNT);
+ return -EINVAL;
+ }
+ if (in_atomic()) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: io in atomic context not supported\n");
+ return -EWOULDBLOCK;
+ }
+ if (!usb_int_enabled(usb)) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: usb interrupt not enabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
+ req = kmalloc(req_len, GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+ req->id = cpu_to_le16(USB_REQ_READ_REGS);
+ for (i = 0; i < count; i++)
+ req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i]));
+
+ udev = zd_usb_to_usbdev(usb);
+ prepare_read_regs_int(usb);
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 1000 /* ms */);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_bulk_msg(). Error number %d\n", r);
+ goto error;
+ }
+ if (req_len != actual_req_len) {
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
+ " req_len %d != actual_req_len %d\n",
+ req_len, actual_req_len);
+ r = -EIO;
+ goto error;
+ }
+
+ timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ disable_read_regs_int(usb);
+ dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
+ r = -ETIMEDOUT;
+ goto error;
+ }
+
+ r = get_results(usb, values, req, count);
+error:
+ kfree(req);
+ return r;
+}
+
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count)
+{
+ int r;
+ struct usb_device *udev;
+ struct usb_req_write_regs *req = NULL;
+ int i, req_len, actual_req_len;
+
+ if (count == 0)
+ return 0;
+ if (count > USB_MAX_IOWRITE16_COUNT) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: count %u exceeds possible max %u\n",
+ count, USB_MAX_IOWRITE16_COUNT);
+ return -EINVAL;
+ }
+ if (in_atomic()) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: io in atomic context not supported\n");
+ return -EWOULDBLOCK;
+ }
+
+ req_len = sizeof(struct usb_req_write_regs) +
+ count * sizeof(struct reg_data);
+ req = kmalloc(req_len, GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+
+ req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
+ for (i = 0; i < count; i++) {
+ struct reg_data *rw = &req->reg_writes[i];
+ rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr));
+ rw->value = cpu_to_le16(ioreqs[i].value);
+ }
+
+ udev = zd_usb_to_usbdev(usb);
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 1000 /* ms */);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_bulk_msg(). Error number %d\n", r);
+ goto error;
+ }
+ if (req_len != actual_req_len) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_bulk_msg()"
+ " req_len %d != actual_req_len %d\n",
+ req_len, actual_req_len);
+ r = -EIO;
+ goto error;
+ }
+
+ /* FALL-THROUGH with r == 0 */
+error:
+ kfree(req);
+ return r;
+}
+
+int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
+{
+ int r;
+ struct usb_device *udev;
+ struct usb_req_rfwrite *req = NULL;
+ int i, req_len, actual_req_len;
+ u16 bit_value_template;
+
+ if (in_atomic()) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: io in atomic context not supported\n");
+ return -EWOULDBLOCK;
+ }
+ if (bits < USB_MIN_RFWRITE_BIT_COUNT) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: bits %d are smaller than"
+ " USB_MIN_RFWRITE_BIT_COUNT %d\n",
+ bits, USB_MIN_RFWRITE_BIT_COUNT);
+ return -EINVAL;
+ }
+ if (bits > USB_MAX_RFWRITE_BIT_COUNT) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n",
+ bits, USB_MAX_RFWRITE_BIT_COUNT);
+ return -EINVAL;
+ }
+#ifdef DEBUG
+ if (value & (~0UL << bits)) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error: value %#09x has bits >= %d set\n",
+ value, bits);
+ return -EINVAL;
+ }
+#endif /* DEBUG */
+
+ dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits);
+
+ r = zd_usb_ioread16(usb, &bit_value_template, CR203);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error %d: Couldn't read CR203\n", r);
+ goto out;
+ }
+ bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
+
+ req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
+ req = kmalloc(req_len, GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+
+ req->id = cpu_to_le16(USB_REQ_WRITE_RF);
+ /* 1: 3683a, but not used in ZYDAS driver */
+ req->value = cpu_to_le16(2);
+ req->bits = cpu_to_le16(bits);
+
+ for (i = 0; i < bits; i++) {
+ u16 bv = bit_value_template;
+ if (value & (1 << (bits-1-i)))
+ bv |= RF_DATA;
+ req->bit_values[i] = cpu_to_le16(bv);
+ }
+
+ udev = zd_usb_to_usbdev(usb);
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 1000 /* ms */);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_bulk_msg(). Error number %d\n", r);
+ goto out;
+ }
+ if (req_len != actual_req_len) {
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
+ " req_len %d != actual_req_len %d\n",
+ req_len, actual_req_len);
+ r = -EIO;
+ goto out;
+ }
+
+ /* FALL-THROUGH with r == 0 */
+out:
+ kfree(req);
+ return r;
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
new file mode 100644
index 000000000000..d6420283bd5a
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -0,0 +1,240 @@
+/* zd_usb.h: Header for USB interface implemented by ZD1211 chip
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_USB_H
+#define _ZD_USB_H
+
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+
+#include "zd_def.h"
+#include "zd_types.h"
+
+enum devicetype {
+ DEVICE_ZD1211 = 0,
+ DEVICE_ZD1211B = 1,
+};
+
+enum endpoints {
+ EP_CTRL = 0,
+ EP_DATA_OUT = 1,
+ EP_DATA_IN = 2,
+ EP_INT_IN = 3,
+ EP_REGS_OUT = 4,
+};
+
+enum {
+ USB_MAX_TRANSFER_SIZE = 4096, /* bytes */
+ /* FIXME: The original driver uses this value. We have to check,
+ * whether the MAX_TRANSFER_SIZE is sufficient and this needs only be
+ * used if one combined frame is split over two USB transactions.
+ */
+ USB_MAX_RX_SIZE = 4800, /* bytes */
+ USB_MAX_IOWRITE16_COUNT = 15,
+ USB_MAX_IOWRITE32_COUNT = USB_MAX_IOWRITE16_COUNT/2,
+ USB_MAX_IOREAD16_COUNT = 15,
+ USB_MAX_IOREAD32_COUNT = USB_MAX_IOREAD16_COUNT/2,
+ USB_MIN_RFWRITE_BIT_COUNT = 16,
+ USB_MAX_RFWRITE_BIT_COUNT = 28,
+ USB_MAX_EP_INT_BUFFER = 64,
+ USB_ZD1211B_BCD_DEVICE = 0x4810,
+};
+
+enum control_requests {
+ USB_REQ_WRITE_REGS = 0x21,
+ USB_REQ_READ_REGS = 0x22,
+ USB_REQ_WRITE_RF = 0x23,
+ USB_REQ_PROG_FLASH = 0x24,
+ USB_REQ_EEPROM_START = 0x0128, /* ? request is a byte */
+ USB_REQ_EEPROM_MID = 0x28,
+ USB_REQ_EEPROM_END = 0x0228, /* ? request is a byte */
+ USB_REQ_FIRMWARE_DOWNLOAD = 0x30,
+ USB_REQ_FIRMWARE_CONFIRM = 0x31,
+ USB_REQ_FIRMWARE_READ_DATA = 0x32,
+};
+
+struct usb_req_read_regs {
+ __le16 id;
+ __le16 addr[0];
+} __attribute__((packed));
+
+struct reg_data {
+ __le16 addr;
+ __le16 value;
+} __attribute__((packed));
+
+struct usb_req_write_regs {
+ __le16 id;
+ struct reg_data reg_writes[0];
+} __attribute__((packed));
+
+enum {
+ RF_IF_LE = 0x02,
+ RF_CLK = 0x04,
+ RF_DATA = 0x08,
+};
+
+struct usb_req_rfwrite {
+ __le16 id;
+ __le16 value;
+ /* 1: 3683a */
+ /* 2: other (default) */
+ __le16 bits;
+ /* RF2595: 24 */
+ __le16 bit_values[0];
+ /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
+} __attribute__((packed));
+
+/* USB interrupt */
+
+enum usb_int_id {
+ USB_INT_TYPE = 0x01,
+ USB_INT_ID_REGS = 0x90,
+ USB_INT_ID_RETRY_FAILED = 0xa0,
+};
+
+enum usb_int_flags {
+ USB_INT_READ_REGS_EN = 0x01,
+};
+
+struct usb_int_header {
+ u8 type; /* must always be 1 */
+ u8 id;
+} __attribute__((packed));
+
+struct usb_int_regs {
+ struct usb_int_header hdr;
+ struct reg_data regs[0];
+} __attribute__((packed));
+
+struct usb_int_retry_fail {
+ struct usb_int_header hdr;
+ u8 new_rate;
+ u8 _dummy;
+ u8 addr[ETH_ALEN];
+ u8 ibss_wakeup_dest;
+} __attribute__((packed));
+
+struct read_regs_int {
+ struct completion completion;
+ /* Stores the USB int structure and contains the USB address of the
+ * first requested register before request.
+ */
+ u8 buffer[USB_MAX_EP_INT_BUFFER];
+ int length;
+ __le16 cr_int_addr;
+};
+
+struct zd_ioreq16 {
+ zd_addr_t addr;
+ u16 value;
+};
+
+struct zd_ioreq32 {
+ zd_addr_t addr;
+ u32 value;
+};
+
+struct zd_usb_interrupt {
+ struct read_regs_int read_regs;
+ spinlock_t lock;
+ struct urb *urb;
+ int interval;
+ u8 read_regs_enabled:1;
+};
+
+static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
+{
+ return (struct usb_int_regs *)intr->read_regs.buffer;
+}
+
+#define URBS_COUNT 5
+
+struct zd_usb_rx {
+ spinlock_t lock;
+ u8 fragment[2*USB_MAX_RX_SIZE];
+ unsigned int fragment_length;
+ unsigned int usb_packet_size;
+ struct urb **urbs;
+ int urbs_count;
+};
+
+struct zd_usb_tx {
+ spinlock_t lock;
+};
+
+/* Contains the usb parts. The structure doesn't require a lock, because intf
+ * and fw_base_offset, will not be changed after initialization.
+ */
+struct zd_usb {
+ struct zd_usb_interrupt intr;
+ struct zd_usb_rx rx;
+ struct zd_usb_tx tx;
+ struct usb_interface *intf;
+ u16 fw_base_offset;
+};
+
+#define zd_usb_dev(usb) (&usb->intf->dev)
+
+static inline struct usb_device *zd_usb_to_usbdev(struct zd_usb *usb)
+{
+ return interface_to_usbdev(usb->intf);
+}
+
+static inline struct net_device *zd_intf_to_netdev(struct usb_interface *intf)
+{
+ return usb_get_intfdata(intf);
+}
+
+static inline struct net_device *zd_usb_to_netdev(struct zd_usb *usb)
+{
+ return zd_intf_to_netdev(usb->intf);
+}
+
+void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
+ struct usb_interface *intf);
+int zd_usb_init_hw(struct zd_usb *usb);
+void zd_usb_clear(struct zd_usb *usb);
+
+int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
+
+int zd_usb_enable_int(struct zd_usb *usb);
+void zd_usb_disable_int(struct zd_usb *usb);
+
+int zd_usb_enable_rx(struct zd_usb *usb);
+void zd_usb_disable_rx(struct zd_usb *usb);
+
+int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length);
+
+int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
+ const zd_addr_t *addresses, unsigned int count);
+
+static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
+ const zd_addr_t addr)
+{
+ return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
+}
+
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count);
+
+int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits);
+
+#endif /* _ZD_USB_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_util.c b/drivers/net/wireless/zd1211rw/zd_util.c
new file mode 100644
index 000000000000..d20036c15d11
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_util.c
@@ -0,0 +1,82 @@
+/* zd_util.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Utility program
+ */
+
+#include "zd_def.h"
+#include "zd_util.h"
+
+#ifdef DEBUG
+static char hex(u8 v)
+{
+ v &= 0xf;
+ return (v < 10 ? '0' : 'a' - 10) + v;
+}
+
+static char hex_print(u8 c)
+{
+ return (0x20 <= c && c < 0x7f) ? c : '.';
+}
+
+static void dump_line(const u8 *bytes, size_t size)
+{
+ char c;
+ size_t i;
+
+ size = size <= 8 ? size : 8;
+ printk(KERN_DEBUG "zd1211 %p ", bytes);
+ for (i = 0; i < 8; i++) {
+ switch (i) {
+ case 1:
+ case 5:
+ c = '.';
+ break;
+ case 3:
+ c = ':';
+ break;
+ default:
+ c = ' ';
+ }
+ if (i < size) {
+ printk("%c%c%c", hex(bytes[i] >> 4), hex(bytes[i]), c);
+ } else {
+ printk(" %c", c);
+ }
+ }
+
+ for (i = 0; i < size; i++)
+ printk("%c", hex_print(bytes[i]));
+ printk("\n");
+}
+
+void zd_hexdump(const void *bytes, size_t size)
+{
+ size_t i = 0;
+
+ do {
+ dump_line((u8 *)bytes + i, size-i);
+ i += 8;
+ } while (i < size);
+}
+#endif /* DEBUG */
+
+void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size)
+{
+ if (buffer_size < tail_size)
+ return NULL;
+ return (u8 *)buffer + (buffer_size - tail_size);
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_util.h b/drivers/net/wireless/zd1211rw/zd_util.h
new file mode 100644
index 000000000000..ce26f7adea92
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_util.h
@@ -0,0 +1,29 @@
+/* zd_util.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ZD_UTIL_H
+#define _ZD_UTIL_H
+
+void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size);
+
+#ifdef DEBUG
+void zd_hexdump(const void *bytes, size_t size);
+#else
+#define zd_hexdump(bytes, size)
+#endif /* DEBUG */
+
+#endif /* _ZD_UTIL_H */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index bbbf7e274a2a..8459a18254a4 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -19,37 +19,13 @@
Support and updates available at
http://www.scyld.com/network/yellowfin.html
+ [link no longer provides useful info -jgarzik]
-
- Linux kernel changelog:
- -----------------------
-
- LK1.1.1 (jgarzik): Port to 2.4 kernel
-
- LK1.1.2 (jgarzik):
- * Merge in becker version 1.05
-
- LK1.1.3 (jgarzik):
- * Various cleanups
- * Update yellowfin_timer to correctly calculate duplex.
- (suggested by Manfred Spraul)
-
- LK1.1.4 (val@nmt.edu):
- * Fix three endian-ness bugs
- * Support dual function SYM53C885E ethernet chip
-
- LK1.1.5 (val@nmt.edu):
- * Fix forced full-duplex bug I introduced
-
- LK1.1.6 (val@nmt.edu):
- * Only print warning on truly "oversized" packets
- * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
-
*/
#define DRV_NAME "yellowfin"
-#define DRV_VERSION "1.05+LK1.1.6"
-#define DRV_RELDATE "Feb 11, 2002"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Jun 27, 2006"
#define PFX DRV_NAME ": "
@@ -239,8 +215,11 @@ enum capability_flags {
HasMACAddrBug=32, /* Only on early revs. */
DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
};
+
/* The PCI I/O space extent. */
-#define YELLOWFIN_SIZE 0x100
+enum {
+ YELLOWFIN_SIZE = 0x100,
+};
struct pci_id_info {
const char *name;
@@ -248,16 +227,14 @@ struct pci_id_info {
int pci, pci_mask, subsystem, subsystem_mask;
int revision, revision_mask; /* Only 8 bits. */
} id;
- int io_size; /* Needed for I/O region check or ioremap(). */
int drv_flags; /* Driver use, intended as capability flags. */
};
static const struct pci_id_info pci_id_tbl[] = {
{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
- YELLOWFIN_SIZE,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
- YELLOWFIN_SIZE, HasMII | DontUseEeprom },
+ HasMII | DontUseEeprom },
{ }
};