diff options
Diffstat (limited to 'drivers/net')
83 files changed, 8409 insertions, 1561 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 4532b17e40ea..aedfddf20cb3 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb, /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry; - unsigned long flags, i; + unsigned long flags; + int i; if (vp->tx_full) /* No room to transmit with */ return 1; diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 7e2ca9571467..257d3bce3993 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -899,7 +899,7 @@ memory_squeeze: } -static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) { struct i596_cmd *ptr; @@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private lp->scb.cmd = I596_NULL; } -static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) +static void i596_reset(struct net_device *dev, struct i596_private *lp, + int ioaddr) { unsigned long flags; @@ -1578,7 +1579,7 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "i82596 debug mask"); -int init_module(void) +int __init init_module(void) { if (debug >= 0) i596_debug = debug; @@ -1588,7 +1589,7 @@ int init_module(void) return 0; } -void cleanup_module(void) +void __exit cleanup_module(void) { unregister_netdev(dev_82596); #ifdef __mc68000__ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 39189903e355..30b3671d833d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO If unsure, say Y. +config VIA_RHINE_NAPI + bool "Use Rx Polling (NAPI)" + depends on VIA_RHINE + help + NAPI is a new driver API designed to reduce CPU and interrupt load + when the driver is receiving lots of packets from the card. + + If your estimated Rx load is 10kpps or more, or if the card will be + deployed on potentially unfriendly networks (e.g. in a firewall), + then say Y here. + + See <file:Documentation/networking/NAPI_HOWTO.txt> for more + information. + config LAN_SAA9730 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" depends on NET_PCI && EXPERIMENTAL && MIPS @@ -2219,6 +2233,33 @@ config GFAR_NAPI bool "NAPI Support" depends on GIANFAR +config UCC_GETH + tristate "Freescale QE UCC GETH" + depends on QUICC_ENGINE && UCC_FAST + help + This driver supports the Gigabit Ethernet mode of QE UCC. + QE can be found on MPC836x CPUs. + +config UGETH_NAPI + bool "NAPI Support" + depends on UCC_GETH + +config UGETH_MAGIC_PACKET + bool "Magic Packet detection support" + depends on UCC_GETH + +config UGETH_FILTERING + bool "Mac address filtering support" + depends on UCC_GETH + +config UGETH_TX_ON_DEMOND + bool "Transmit on Demond support" + depends on UCC_GETH + +config UGETH_HAS_GIGA + bool + depends on UCC_GETH && MPC836x + config MV643XX_ETH tristate "MV-643XX Ethernet support" depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c91e95126f78..8427bf9dec9d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \ gianfar_mii.o \ gianfar_sysfs.o +obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o +ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o + # # link order important here # diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 7952dc6d77e3..0fbbcb75af69 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)"); MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); MODULE_LICENSE("GPL"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index b14e89004c3a..0a0e0cd81a23 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -29,7 +29,7 @@ config ATALK even politically correct people are allowed to say Y here. config DEV_APPLETALK - bool "Appletalk interfaces support" + tristate "Appletalk interfaces support" depends on ATALK help AppleTalk is the protocol that Apple computers can use to communicate diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 1d01ac0000e4..ae7f828344d9 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -1030,7 +1030,7 @@ module_param(io, int, 0); module_param(irq, int, 0); module_param(board_type, int, 0); -int init_module(void) +int __init init_module(void) { if (io == 0) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 5d7929c79bce..4ca061c2d5b2 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address"); MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); -int init_module(void) +int __init init_module(void) { if (io == 0) printk("at1700: You should not use auto-probing with insmod!\n"); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index db73de0d2511..652eb05a6c2d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -56,8 +56,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.4.43" -#define DRV_MODULE_RELDATE "June 28, 2006" +#define DRV_MODULE_VERSION "1.4.44" +#define DRV_MODULE_RELDATE "August 10, 2006" #define RUN_AT(x) (jiffies + (x)) @@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); static inline u32 bnx2_tx_avail(struct bnx2 *bp) { - u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); + u32 diff; + smp_mb(); + diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); if (diff > MAX_TX_DESC_CNT) diff = (diff & MAX_TX_DESC_CNT) - 1; return (bp->tx_ring_size - diff); @@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; unsigned long align; - skb = dev_alloc_skb(bp->rx_buf_size); + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); if (skb == NULL) { return -ENOMEM; } @@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) skb_reserve(skb, 8 - align); } - skb->dev = bp->dev; mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp) } bp->tx_cons = sw_cons; + /* Need to make the tx_cons update visible to bnx2_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnx2_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); - if (unlikely(netif_queue_stopped(bp->dev))) { - spin_lock(&bp->tx_lock); + if (unlikely(netif_queue_stopped(bp->dev)) && + (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { + netif_tx_lock(bp->dev); if ((netif_queue_stopped(bp->dev)) && - (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { - + (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) netif_wake_queue(bp->dev); - } - spin_unlock(&bp->tx_lock); + netif_tx_unlock(bp->dev); } } @@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { struct sk_buff *new_skb; - new_skb = dev_alloc_skb(len + 2); + new_skb = netdev_alloc_skb(bp->dev, len + 2); if (new_skb == NULL) goto reuse_rx; @@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget) skb_reserve(new_skb, 2); skb_put(new_skb, len); - new_skb->dev = bp->dev; bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); @@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp) struct tx_bd *txbd; u32 val; + bp->tx_wake_thresh = bp->tx_ring_size / 2; + txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; @@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) return -EINVAL; pkt_size = 1514; - skb = dev_alloc_skb(pkt_size); + skb = netdev_alloc_skb(bp->dev, pkt_size); if (!skb) return -ENOMEM; packet = skb_put(skb, pkt_size); @@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) #endif /* Called with netif_tx_lock. - * hard_start_xmit is pseudo-lockless - a lock is only required when - * the tx queue is full. This way, we get the benefit of lockless - * operations most of the time without the complexities to handle - * netif_stop_queue/wake_queue race conditions. + * bnx2_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue(). */ static int bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { - spin_lock(&bp->tx_lock); netif_stop_queue(dev); - - if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS) + if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) netif_wake_queue(dev); - spin_unlock(&bp->tx_lock); } return NETDEV_TX_OK; @@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->pdev = pdev; spin_lock_init(&bp->phy_lock); - spin_lock_init(&bp->tx_lock); INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); @@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->mac_addr[5] = (u8) reg; bp->tx_ring_size = MAX_TX_DESC_CNT; - bnx2_set_rx_ring_size(bp, 100); + bnx2_set_rx_ring_size(bp, 255); bp->rx_csum = 1; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 658c5ee95c73..fe804763c607 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -3890,10 +3890,6 @@ struct bnx2 { u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); u16 tx_prod; - struct tx_bd *tx_desc_ring; - struct sw_bd *tx_buf_ring; - int tx_ring_size; - u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); u16 hw_tx_cons; @@ -3916,9 +3912,11 @@ struct bnx2 { struct sw_bd *rx_buf_ring; struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; - /* Only used to synchronize netif_stop_queue/wake_queue when tx */ - /* ring is full */ - spinlock_t tx_lock; + /* TX constants */ + struct tx_bd *tx_desc_ring; + struct sw_bd *tx_buf_ring; + int tx_ring_size; + u32 tx_wake_thresh; /* End of fields used in the performance code paths. */ diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 47eecce35fa4..2dcca79b1f6a 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL"); */ -int -init_module(void) +int __init init_module(void) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); struct net_local *lp; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 1b758b707134..3d76fa144c4f 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev) spin_unlock_irqrestore(&db->lock,flags); } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + *Used by netconsole + */ +static void dm9000_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + dm9000_interrupt(dev->irq,dev,NULL); + enable_irq(dev->irq); +} +#endif /* dm9000_release_board * @@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev) ndev->stop = &dm9000_stop; ndev->get_stats = &dm9000_get_stats; ndev->set_multicast_list = &dm9000_hash_table; +#ifdef CONFIG_NET_POLL_CONTROLLER + ndev->poll_controller = &dm9000_poll_controller; +#endif #ifdef DM9000_PROGRAM_EEPROM program_eeprom(db); diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 583518ae49ce..b3b919116e0f 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -105,6 +105,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex); static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); +static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, + uint32_t segment); +static int32_t e1000_get_software_flag(struct e1000_hw *hw); +static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); +static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); +static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); +static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t* data); +static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, + uint16_t *data); +static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, + uint16_t *data); +static void e1000_release_software_flag(struct e1000_hw *hw); +static void e1000_release_software_semaphore(struct e1000_hw *hw); +static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, + uint32_t no_snoop); +static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, + uint32_t index, uint8_t byte); +static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t data); +static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, + uint16_t data); + /* IGP cable length table */ static const uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = @@ -3233,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) return data; } -int32_t +static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) { uint32_t swfw_sync = 0; @@ -3277,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) return E1000_SUCCESS; } -void +static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) { uint32_t swfw_sync; @@ -3575,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, return E1000_SUCCESS; } -int32_t +static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data) @@ -3608,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw, return E1000_SUCCESS; } -int32_t +static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data) @@ -3839,7 +3866,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) * * hw - struct containing variables accessed by shared code ******************************************************************************/ -int32_t +static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) { int32_t ret_val; @@ -4086,7 +4113,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, * hw - Struct containing variables accessed by shared code * phy_info - PHY information structure ******************************************************************************/ -int32_t +static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) { @@ -5643,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw) * for the first 15 multicast addresses, and hashes the rest into the * multicast table. *****************************************************************************/ +#if 0 void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t *mc_addr_list, @@ -5719,6 +5747,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, } DEBUGOUT("MC Update Complete\n"); } +#endif /* 0 */ /****************************************************************************** * Hashes an address to determine its location in the multicast table @@ -6587,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw) * hw - Struct containing variables accessed by shared code * offset - offset to read from *****************************************************************************/ +#if 0 uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset) @@ -6597,6 +6627,7 @@ e1000_read_reg_io(struct e1000_hw *hw, e1000_io_write(hw, io_addr, offset); return e1000_io_read(hw, io_data); } +#endif /* 0 */ /****************************************************************************** * Writes a value to one of the devices registers using port I/O (as opposed to @@ -7909,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) * returns: - none. * ***************************************************************************/ +#if 0 void e1000_enable_pciex_master(struct e1000_hw *hw) { @@ -7923,6 +7955,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw) ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; E1000_WRITE_REG(hw, CTRL, ctrl); } +#endif /* 0 */ /******************************************************************************* * @@ -8148,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) * E1000_SUCCESS at any other case. * ***************************************************************************/ -int32_t +static int32_t e1000_get_software_semaphore(struct e1000_hw *hw) { int32_t timeout = hw->eeprom.word_size + 1; @@ -8183,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -void +static void e1000_release_software_semaphore(struct e1000_hw *hw) { uint32_t swsm; @@ -8265,7 +8298,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) * returns: E1000_SUCCESS * *****************************************************************************/ -int32_t +static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) { uint32_t gcr_reg = 0; @@ -8306,7 +8339,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -int32_t +static int32_t e1000_get_software_flag(struct e1000_hw *hw) { int32_t timeout = PHY_CFG_TIMEOUT; @@ -8345,7 +8378,7 @@ e1000_get_software_flag(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -void +static void e1000_release_software_flag(struct e1000_hw *hw) { uint32_t extcnf_ctrl; @@ -8369,6 +8402,7 @@ e1000_release_software_flag(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ +#if 0 int32_t e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) { @@ -8388,6 +8422,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ /*************************************************************************** * @@ -8397,6 +8432,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ +#if 0 int32_t e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) { @@ -8416,6 +8452,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ /****************************************************************************** * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access @@ -8426,7 +8463,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) * data - word read from the EEPROM * words - number of words to read *****************************************************************************/ -int32_t +static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data) { @@ -8482,7 +8519,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, * words - number of words to write * data - words to write to the EEPROM *****************************************************************************/ -int32_t +static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data) { @@ -8529,7 +8566,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, * * hw - The pointer to the hw structure ****************************************************************************/ -int32_t +static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; @@ -8596,7 +8633,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) * * hw - The pointer to the hw structure ****************************************************************************/ -int32_t +static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) { union ich8_hws_flash_ctrl hsflctl; @@ -8631,7 +8668,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) * size - Size of data to read, 1=byte 2=word * data - Pointer to the word to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t* data) { @@ -8710,7 +8747,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, * size - Size of data to read, 1=byte 2=word * data - The byte(s) to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data) { @@ -8785,7 +8822,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, * index - The index of the byte to read. * data - Pointer to a byte to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) { int32_t status = E1000_SUCCESS; @@ -8808,7 +8845,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) * index - The index of the byte to write. * byte - The byte to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) { int32_t error = E1000_SUCCESS; @@ -8839,7 +8876,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) * index - The index of the byte to read. * data - The byte to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) { int32_t status = E1000_SUCCESS; @@ -8857,7 +8894,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) * index - The starting byte index of the word to read. * data - Pointer to a word to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) { int32_t status = E1000_SUCCESS; @@ -8872,6 +8909,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) * index - The starting byte index of the word to read. * data - The word to write to the NVM. *****************************************************************************/ +#if 0 int32_t e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) { @@ -8879,6 +8917,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) status = e1000_write_ich8_data(hw, index, 2, data); return status; } +#endif /* 0 */ /****************************************************************************** * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. @@ -8887,7 +8926,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) * hw - pointer to e1000_hw structure * segment - 0 for first segment, 1 for second segment, etc. *****************************************************************************/ -int32_t +static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) { union ich8_hws_flash_status hsfsts; @@ -8984,6 +9023,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) * hw: Struct containing variables accessed by shared code * *****************************************************************************/ +#if 0 int32_t e1000_duplex_reversal(struct e1000_hw *hw) { @@ -9012,8 +9052,9 @@ e1000_duplex_reversal(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ -int32_t +static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size) { @@ -9047,7 +9088,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, } -int32_t +static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw) { uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index f9341e3276b3..375b95518c31 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat int32_t e1000_phy_hw_reset(struct e1000_hw *hw); int32_t e1000_phy_reset(struct e1000_hw *hw); void e1000_phy_powerdown_workaround(struct e1000_hw *hw); -int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); -int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); -int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); -int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); -int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); /* EEPROM Functions */ int32_t e1000_init_eeprom_params(struct e1000_hw *hw); @@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); int32_t e1000_read_mac_addr(struct e1000_hw * hw); -int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); -void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); -void e1000_release_software_flag(struct e1000_hw *hw); -int32_t e1000_get_software_flag(struct e1000_hw *hw); /* Filters (multicast, vlan, receive) */ -void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); @@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); /* Port I/O is only supported on 82544 and newer */ -uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); -uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset); void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); -void e1000_enable_pciex_master(struct e1000_hw *hw); int32_t e1000_disable_pciex_master(struct e1000_hw *hw); -int32_t e1000_get_software_semaphore(struct e1000_hw *hw); -void e1000_release_software_semaphore(struct e1000_hw *hw); int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); -int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); - -int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t *data); -int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t byte); -int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t byte); -int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, - uint16_t *data); -int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, - uint32_t size, uint16_t *data); -int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, - uint16_t words, uint16_t *data); -int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, - uint16_t words, uint16_t *data); -int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment); #define E1000_READ_REG_IO(a, reg) \ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index da62db897426..726f43d55937 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3127,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) break; } - /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size * i.e. RXBUFFER_2048 --> size-4096 slab */ @@ -3708,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, #define E1000_CB_LENGTH 256 if (length < E1000_CB_LENGTH) { struct sk_buff *new_skb = - dev_alloc_skb(length + NET_IP_ALIGN); + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); new_skb->dev = netdev; @@ -3979,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, while (cleaned_count--) { if (!(skb = buffer_info->skb)) - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); else { skb_trim(skb, 0); goto map_skb; @@ -3997,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " "at %p\n", bufsz, skb->data); /* Try again, without freeing the previous */ - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); /* Failed allocation, critical failure */ if (!skb) { dev_kfree_skb(oldskb); @@ -4121,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, rx_desc->read.buffer_addr[j+1] = ~0; } - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); + skb = netdev_alloc_skb(netdev, + adapter->rx_ps_bsize0 + NET_IP_ALIGN); if (unlikely(!skb)) { adapter->alloc_rx_buff_failed++; @@ -4385,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +#if 0 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port) { return inl(port); } +#endif /* 0 */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index e5c5cd2a2712..e4e733a380e3 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c @@ -425,8 +425,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 20d31430c74f..8dc61d65dd23 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int i; diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 33291bcf6d4c..0701c1d810ca 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL"); * are specified, we verify and then use them. If no parameters are given, we * autoprobe for one card only. */ -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 6b0ab1eac3fb..fd7b32a24ea4 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c @@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); MODULE_LICENSE("GPL"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 4bf76f86d8e9..ca42efa9143c 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto, module_param(debug, int, 0); MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); -int init_module(void) +int __init init_module(void) { int this_dev, found = 0; struct net_device *dev; diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 97d34fee8c1f..567e27413cfd 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; #include <asm/uaccess.h> /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile index d6dd3f2fb43e..02d4dc18ba69 100644 --- a/drivers/net/fs_enet/Makefile +++ b/drivers/net/fs_enet/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet.o -obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o -obj-$(CONFIG_8260) += mac-fcc.o +obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o +obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o -fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o +fs_enet-objs := fs_enet-main.o diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h new file mode 100644 index 000000000000..e980527e2b99 --- /dev/null +++ b/drivers/net/fs_enet/fec.h @@ -0,0 +1,42 @@ +#ifndef FS_ENET_FEC_H +#define FS_ENET_FEC_H + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + + + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 +#endif diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index f6abff5846b3..df62506a1787 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -37,6 +37,7 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <linux/vmalloc.h> #include <asm/pgtable.h> @@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq) (*fep->ops->post_free_irq)(dev, irq); } -/**********************************************************************************/ - -/* This interrupt occurs when the PHY detects a link change. */ -static irqreturn_t -fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - struct net_device *dev = dev_id; - struct fs_enet_private *fep; - const struct fs_platform_info *fpi; - - fep = netdev_priv(dev); - fpi = fep->fpi; - - /* - * Acknowledge the interrupt if possible. If we have not - * found the PHY yet we can't process or acknowledge the - * interrupt now. Instead we ignore this interrupt for now, - * which we can do since it is edge triggered. It will be - * acknowledged later by fs_enet_open(). - */ - if (!fep->phy) - return IRQ_NONE; - - fs_mii_ack_int(dev); - fs_mii_link_status_change_check(dev, 0); - - return IRQ_HANDLED; -} - static void fs_timeout(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); @@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev) spin_lock_irqsave(&fep->lock, flags); if (dev->flags & IFF_UP) { + phy_stop(fep->phydev); (*fep->ops->stop)(dev); (*fep->ops->restart)(dev); + phy_start(fep->phydev); } + phy_start(fep->phydev); wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev) netif_wake_queue(dev); } +/*----------------------------------------------------------------------------- + * generic link-change handler - should be sufficient for most cases + *-----------------------------------------------------------------------------*/ +static void generic_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex){ + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + netif_schedule(dev); + netif_carrier_on(dev); + netif_start_queue(dev); + } + + if (new_state) + fep->ops->restart(dev); + + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + netif_carrier_off(dev); + netif_stop_queue(dev); + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); +} + + +static void fs_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&fep->lock, flags); + + if(fep->ops->adjust_link) + fep->ops->adjust_link(dev); + else + generic_adjust_link(dev); + + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fs_init_phy(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev; + + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + if(fep->fpi->bus_id) + phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0); + else { + printk("No phy bus ID specified in BSP code\n"); + return -EINVAL; + } + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + fep->phydev = phydev; + + return 0; +} + + static int fs_enet_open(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; int r; + int err; /* Install our interrupt handler. */ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); if (r != 0) { printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate FEC IRQ!", dev->name); + ": %s Could not allocate FS_ENET IRQ!", dev->name); return -EINVAL; } - /* Install our phy interrupt handler */ - if (fpi->phy_irq != -1) { - - r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); - if (r != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate PHY IRQ!", dev->name); - fs_free_irq(dev, fep->interrupt); - return -EINVAL; - } - } + err = fs_init_phy(dev); + if(err) + return err; - fs_mii_startup(dev); - netif_carrier_off(dev); - fs_mii_link_status_change_check(dev, 1); + phy_start(fep->phydev); return 0; } @@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev) static int fs_enet_close(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; unsigned long flags; netif_stop_queue(dev); netif_carrier_off(dev); - fs_mii_shutdown(dev); + phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); (*fep->ops->stop)(dev); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ - if (fpi->phy_irq != -1) - fs_free_irq(dev, fpi->phy_irq); + phy_disconnect(fep->phydev); + fep->phydev = NULL; fs_free_irq(dev, fep->interrupt); return 0; @@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_gset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + return phy_ethtool_gset(fep->phydev, cmd); } static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_sset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + phy_ethtool_sset(fep->phydev, cmd); + return 0; } static int fs_nway_reset(struct net_device *dev) { - struct fs_enet_private *fep = netdev_priv(dev); - return mii_nway_restart(&fep->mii_if); + return 0; } static u32 fs_get_msglevel(struct net_device *dev) @@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; spin_lock_irqsave(&fep->lock, flags); - rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); + rc = phy_mii_ioctl(fep->phydev, mii, cmd); spin_unlock_irqrestore(&fep->lock, flags); return rc; } @@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev, } registered = 1; - err = fs_mii_connect(ndev); - if (err != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s fs_mii_connect failed.\n", ndev->name); - goto err; - } return ndev; @@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev) fpi = fep->fpi; - fs_mii_disconnect(ndev); - unregister_netdev(ndev); dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), @@ -1196,17 +1225,39 @@ static int __init fs_init(void) r = setup_immap(); if (r != 0) return r; - r = driver_register(&fs_enet_fec_driver); + +#ifdef CONFIG_FS_ENET_HAS_FCC + /* let's insert mii stuff */ + r = fs_enet_mdio_bb_init(); + + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "BB PHY init failed.\n"); + return r; + } + r = driver_register(&fs_enet_fcc_driver); if (r != 0) goto err; +#endif - r = driver_register(&fs_enet_fcc_driver); +#ifdef CONFIG_FS_ENET_HAS_FEC + r = fs_enet_mdio_fec_init(); + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "FEC PHY init failed.\n"); + return r; + } + + r = driver_register(&fs_enet_fec_driver); if (r != 0) goto err; +#endif +#ifdef CONFIG_FS_ENET_HAS_SCC r = driver_register(&fs_enet_scc_driver); if (r != 0) goto err; +#endif return 0; err: diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c deleted file mode 100644 index b7e6e21725cb..000000000000 --- a/drivers/net/fs_enet/fs_enet-mii.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> - * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/uaccess.h> - -#include "fs_enet.h" - -/*************************************************/ - -/* - * Generic PHY support. - * Should work for all PHYs, but link change is detected by polling - */ - -static void generic_timer_callback(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; - - add_timer(&fep->phy_timer_list); - - fs_mii_link_status_change_check(dev, 0); -} - -static void generic_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ - fep->phy_timer_list.data = (unsigned long)dev; - fep->phy_timer_list.function = generic_timer_callback; - add_timer(&fep->phy_timer_list); -} - -static void generic_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - del_timer_sync(&fep->phy_timer_list); -} - -/* ------------------------------------------------------------------------- */ -/* The Davicom DM9161 is used on the NETTA board */ - -/* register definitions */ - -#define MII_DM9161_ANAR 4 /* Aux. Config Register */ -#define MII_DM9161_ACR 16 /* Aux. Config Register */ -#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ -#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ -#define MII_DM9161_INTR 21 /* Interrupt Register */ -#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ -#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ - -static void dm9161_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); - /* Start autonegotiation */ - fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200); - - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ*8); -} - -static void dm9161_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); -} - -static void dm9161_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); -} - -/**********************************************************************************/ - -static const struct phy_info phy_info[] = { - { - .id = 0x00181b88, - .name = "DM9161", - .startup = dm9161_startup, - .ack_int = dm9161_ack_int, - .shutdown = dm9161_shutdown, - }, { - .id = 0, - .name = "GENERIC", - .startup = generic_startup, - .shutdown = generic_shutdown, - }, -}; - -/**********************************************************************************/ - -static int phy_id_detect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = fep->mii_bus; - int i, r, start, end, phytype, physubtype; - const struct phy_info *phy; - int phy_hwid, phy_id; - - phy_hwid = -1; - fep->phy = NULL; - - /* auto-detect? */ - if (fpi->phy_addr == -1) { - start = 1; - end = 32; - } else { /* direct */ - start = fpi->phy_addr; - end = start + 1; - } - - for (phy_id = start; phy_id < end; phy_id++) { - /* skip already used phy addresses on this bus */ - if (bus->usage_map & (1 << phy_id)) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID1); - if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID2); - if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) - continue; - phy_hwid = (phytype << 16) | physubtype; - if (phy_hwid != -1) - break; - } - - if (phy_hwid == -1) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s No PHY detected! range=0x%02x-0x%02x\n", - dev->name, start, end); - return -1; - } - - for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) - if (phy->id == (phy_hwid >> 4) || phy->id == 0) - break; - - if (i >= ARRAY_SIZE(phy_info)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s PHY id 0x%08x is not supported!\n", - dev->name, phy_hwid); - return -1; - } - - fep->phy = phy; - - /* mark this address as used */ - bus->usage_map |= (1 << phy_id); - - printk(KERN_INFO DRV_MODULE_NAME - ": %s Phy @ 0x%x, type %s (0x%08x)%s\n", - dev->name, phy_id, fep->phy->name, phy_hwid, - fpi->phy_addr == -1 ? " (auto-detected)" : ""); - - return phy_id; -} - -void fs_mii_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->startup) - (*fep->phy->startup) (dev); -} - -void fs_mii_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->shutdown) - (*fep->phy->shutdown) (dev); -} - -void fs_mii_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->ack_int) - (*fep->phy->ack_int) (dev); -} - -#define MII_LINK 0x0001 -#define MII_HALF 0x0002 -#define MII_FULL 0x0004 -#define MII_BASE4 0x0008 -#define MII_10M 0x0010 -#define MII_100M 0x0020 -#define MII_1G 0x0040 -#define MII_10G 0x0080 - -/* return full mii info at one gulp, with a usable form */ -static unsigned int mii_full_status(struct mii_if_info *mii) -{ - unsigned int status; - int bmsr, adv, lpa, neg; - struct fs_enet_private* fep = netdev_priv(mii->dev); - - /* first, a dummy read, needed to latch some MII phys */ - (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - - /* no link */ - if ((bmsr & BMSR_LSTATUS) == 0) - return 0; - - status = MII_LINK; - - /* Lets look what ANEG says if it's supported - otherwize we shall - take the right values from the platform info*/ - if(!mii->force_media) { - /* autoneg not completed; don't bother */ - if ((bmsr & BMSR_ANEGCOMPLETE) == 0) - return 0; - - adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE); - lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA); - - neg = lpa & adv; - } else { - neg = fep->fpi->bus_info->lpa; - } - - if (neg & LPA_100FULL) - status |= MII_FULL | MII_100M; - else if (neg & LPA_100BASE4) - status |= MII_FULL | MII_BASE4 | MII_100M; - else if (neg & LPA_100HALF) - status |= MII_HALF | MII_100M; - else if (neg & LPA_10FULL) - status |= MII_FULL | MII_10M; - else - status |= MII_HALF | MII_10M; - - return status; -} - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct mii_if_info *mii = &fep->mii_if; - unsigned int mii_status; - int ok_to_print, link, duplex, speed; - unsigned long flags; - - ok_to_print = netif_msg_link(fep); - - mii_status = mii_full_status(mii); - - if (!init_media && mii_status == fep->last_mii_status) - return; - - fep->last_mii_status = mii_status; - - link = !!(mii_status & MII_LINK); - duplex = !!(mii_status & MII_FULL); - speed = (mii_status & MII_100M) ? 100 : 10; - - if (link == 0) { - netif_carrier_off(mii->dev); - netif_stop_queue(dev); - if (!init_media) { - spin_lock_irqsave(&fep->lock, flags); - (*fep->ops->stop)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - } - - if (ok_to_print) - printk(KERN_INFO "%s: link down\n", mii->dev->name); - - } else { - - mii->full_duplex = duplex; - - netif_carrier_on(mii->dev); - - spin_lock_irqsave(&fep->lock, flags); - fep->duplex = duplex; - fep->speed = speed; - (*fep->ops->restart)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - - netif_start_queue(dev); - - if (ok_to_print) - printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n", - dev->name, speed, duplex ? "full" : "half"); - } -} - -/**********************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - - unsigned long flags; - int ret; - - spin_lock_irqsave(&bus->mii_lock, flags); - ret = (*bus->mii_read)(bus, phy_id, location); - spin_unlock_irqrestore(&bus->mii_lock, flags); - - return ret; -} - -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - unsigned long flags; - - spin_lock_irqsave(&bus->mii_lock, flags); - (*bus->mii_write)(bus, phy_id, location, value); - spin_unlock_irqrestore(&bus->mii_lock, flags); -} - -/*****************************************************************************/ - -/* list of all registered mii buses */ -static LIST_HEAD(fs_mii_bus_list); - -static struct fs_enet_mii_bus *lookup_bus(int method, int id) -{ - struct list_head *ptr; - struct fs_enet_mii_bus *bus; - - list_for_each(ptr, &fs_mii_bus_list) { - bus = list_entry(ptr, struct fs_enet_mii_bus, list); - if (bus->bus_info->method == method && - bus->bus_info->id == id) - return bus; - } - return NULL; -} - -static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi) -{ - struct fs_enet_mii_bus *bus; - int ret = 0; - - bus = kmalloc(sizeof(*bus), GFP_KERNEL); - if (bus == NULL) { - ret = -ENOMEM; - goto err; - } - memset(bus, 0, sizeof(*bus)); - spin_lock_init(&bus->mii_lock); - bus->bus_info = bi; - bus->refs = 0; - bus->usage_map = 0; - - /* perform initialization */ - switch (bi->method) { - - case fsmii_fixed: - ret = fs_mii_fixed_init(bus); - if (ret != 0) - goto err; - break; - - case fsmii_bitbang: - ret = fs_mii_bitbang_init(bus); - if (ret != 0) - goto err; - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - ret = fs_mii_fec_init(bus); - if (ret != 0) - goto err; - break; -#endif - default: - ret = -EINVAL; - goto err; - } - - list_add(&bus->list, &fs_mii_bus_list); - - return bus; - -err: - kfree(bus); - return ERR_PTR(ret); -} - -static void destroy_bus(struct fs_enet_mii_bus *bus) -{ - /* remove from bus list */ - list_del(&bus->list); - - /* nothing more needed */ - kfree(bus); -} - -int fs_mii_connect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = NULL; - - /* check method validity */ - switch (fpi->bus_info->method) { - case fsmii_fixed: - case fsmii_bitbang: - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - break; -#endif - default: - printk(KERN_ERR DRV_MODULE_NAME - ": %s Unknown MII bus method (%d)!\n", - dev->name, fpi->bus_info->method); - return -EINVAL; - } - - bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id); - - /* if not found create new bus */ - if (bus == NULL) { - bus = create_bus(fpi->bus_info); - if (IS_ERR(bus)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s MII bus creation failure!\n", dev->name); - return PTR_ERR(bus); - } - } - - bus->refs++; - - fep->mii_bus = bus; - - fep->mii_if.dev = dev; - fep->mii_if.phy_id_mask = 0x1f; - fep->mii_if.reg_num_mask = 0x1f; - fep->mii_if.mdio_read = fs_mii_read; - fep->mii_if.mdio_write = fs_mii_write; - fep->mii_if.force_media = fpi->bus_info->disable_aneg; - fep->mii_if.phy_id = phy_id_detect(dev); - - return 0; -} - -void fs_mii_disconnect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = NULL; - - bus = fep->mii_bus; - fep->mii_bus = NULL; - - if (--bus->refs <= 0) - destroy_bus(bus); -} diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index e7ec96c964a9..95022c005f75 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h @@ -5,6 +5,7 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/list.h> +#include <linux/phy.h> #include <linux/fs_enet_pd.h> @@ -12,12 +13,30 @@ #ifdef CONFIG_CPM1 #include <asm/commproc.h> + +struct fec_info { + fec_t* fecp; + u32 mii_speed; +}; #endif #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #endif +/* This is used to operate with pins. + Note that the actual port size may + be different; cpm(s) handle it OK */ +struct bb_info { + u8 mdio_dat_msk; + u8 mdio_dir_msk; + u8 *mdio_dir; + u8 *mdio_dat; + u8 mdc_msk; + u8 *mdc_dat; + int delay; +}; + /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); @@ -25,6 +44,7 @@ struct fs_ops { void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); + void (*adjust_link)(struct net_device *dev); void (*restart)(struct net_device *dev); void (*stop)(struct net_device *dev); void (*pre_request_irq)(struct net_device *dev, int irq); @@ -100,10 +120,6 @@ struct fs_enet_mii_bus { }; }; -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus); -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); -int fs_mii_fec_init(struct fs_enet_mii_bus *bus); - struct fs_enet_private { struct device *dev; /* pointer back to the device (must be initialized first) */ spinlock_t lock; /* during all ops except TX pckt processing */ @@ -130,7 +146,8 @@ struct fs_enet_private { struct fs_enet_mii_bus *mii_bus; int interrupt; - int duplex, speed; /* current settings */ + struct phy_device *phydev; + int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ u32 ev_napi_rx; /* mask of NAPI rx events */ @@ -168,15 +185,9 @@ struct fs_enet_private { }; /***************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location); -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); - -void fs_mii_startup(struct net_device *dev); -void fs_mii_shutdown(struct net_device *dev); -void fs_mii_ack_int(struct net_device *dev); - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media); +int fs_enet_mdio_bb_init(void); +int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); +int fs_enet_mdio_fec_init(void); void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); @@ -194,7 +205,6 @@ int fs_enet_platform_init(void); void fs_enet_platform_cleanup(void); /***************************************************************************/ - /* buffer descriptor access macros */ /* access macros */ diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 64e20982c1fe..1ff2597b8495 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -34,6 +34,7 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <asm/immap_cpm2.h> #include <asm/mpc8260.h> @@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); - fep->fcc.ep = (void *)r->start; - + fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.ep == NULL) return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); - fep->fcc.fccp = (void *)r->start; - + fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.fccp == NULL) return -EINVAL; - fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + if (fep->fpi->fcc_regs_c) { + + fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + } else { + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "fcc_regs_c"); + fep->fcc.fcccp = (void *)ioremap(r->start, + r->end - r->start + 1); + } if (fep->fcc.fcccp == NULL) return -EINVAL; + fep->fcc.mem = (void *)fep->fpi->mem_offset; + if (fep->fcc.mem == NULL) + return -EINVAL; + return 0; } @@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev) if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ return -EINVAL; - fep->fcc.mem = (void *)fpi->mem_offset; - if (do_pd_setup(fep) != 0) return -EINVAL; @@ -394,7 +403,7 @@ static void restart(struct net_device *dev) /* adjust to speed (for RMII mode) */ if (fpi->use_rmii) { - if (fep->speed == 100) + if (fep->phydev->speed == 100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -420,7 +429,7 @@ static void restart(struct net_device *dev) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (fep->duplex) + if (fep->phydev->duplex) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); @@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { - /* nothing */ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t *fccp = fep->fcc.fccp; + + S32(fccp, fcc_ftodr, 0x80); } static u32 get_int_events(struct net_device *dev) diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index e09547077529..c2c5fd419bd0 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -46,6 +46,7 @@ #endif #include "fs_enet.h" +#include "fec.h" /*************************************************/ @@ -75,50 +76,8 @@ /* clear bits */ #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) - -/* CRC polynomium used by the FEC for the multicast group filtering */ -#define FEC_CRC_POLY 0x04C11DB7 - -#define FEC_MAX_MULTICAST_ADDRS 64 - -/* Interrupt events/masks. -*/ -#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ -#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ -#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ -#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ -#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ -#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ -#define FEC_ENET_RXF 0x02000000U /* Full frame received */ -#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ -#define FEC_ENET_MII 0x00800000U /* MII interrupt */ -#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ - -#define FEC_ECNTRL_PINMUX 0x00000004 -#define FEC_ECNTRL_ETHER_EN 0x00000002 -#define FEC_ECNTRL_RESET 0x00000001 - -#define FEC_RCNTRL_BC_REJ 0x00000010 -#define FEC_RCNTRL_PROM 0x00000008 -#define FEC_RCNTRL_MII_MODE 0x00000004 -#define FEC_RCNTRL_DRT 0x00000002 -#define FEC_RCNTRL_LOOP 0x00000001 - -#define FEC_TCNTRL_FDEN 0x00000004 -#define FEC_TCNTRL_HBC 0x00000002 -#define FEC_TCNTRL_GTS 0x00000001 - - -/* Make MII read/write commands for the FEC. -*/ -#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) -#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) -#define mk_mii_end 0 - -#define FEC_MII_LOOPS 10000 - /* - * Delay to wait for FEC reset command to complete (in us) + * Delay to wait for FEC reset command to complete (in us) */ #define FEC_RESET_DELAY 50 @@ -303,13 +262,15 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; + struct mii_bus* mii = fep->phydev->bus; + struct fec_info* fec_inf = mii->priv; + r = whack_reset(fep->fec.fecp); if (r != 0) printk(KERN_ERR DRV_MODULE_NAME ": %s FEC Reset FAILED!\n", dev->name); - /* - * Set station address. + * Set station address. */ addrhi = ((u32) dev->dev_addr[0] << 24) | ((u32) dev->dev_addr[1] << 16) | @@ -350,12 +311,12 @@ static void restart(struct net_device *dev) FW(fecp, fun_code, 0x78000000); /* - * Set MII speed. + * Set MII speed. */ - FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); + FW(fecp, mii_speed, fec_inf->mii_speed); /* - * Clear any outstanding interrupt. + * Clear any outstanding interrupt. */ FW(fecp, ievent, 0xffc0); FW(fecp, ivec, (fep->interrupt / 2) << 29); @@ -390,11 +351,12 @@ static void restart(struct net_device *dev) } #endif + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ /* - * adjust to duplex mode + * adjust to duplex mode */ - if (fep->duplex) { + if (fep->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { @@ -418,9 +380,11 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; fec_t *fecp = fep->fec.fecp; - struct fs_enet_mii_bus *bus = fep->mii_bus; - const struct fs_mii_bus_info *bi = bus->bus_info; + + struct fec_info* feci= fep->phydev->bus->priv; + int i; if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) @@ -444,11 +408,11 @@ static void stop(struct net_device *dev) fs_cleanup_bds(dev); /* shut down FEC1? that's where the mii bus is */ - if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { + if (fpi->has_phy) { FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); + FW(fecp, mii_speed, feci->mii_speed); } } @@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = { .free_bd = free_bd, }; -/***********************************************************************/ - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - fec_t *fecp = bus->fec.fecp; - int i, ret = -1; - - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) { - FW(fecp, ievent, FEC_ENET_MII); - ret = FR(fecp, mii_data) & 0xffff; - } - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value) -{ - fec_t *fecp = bus->fec.fecp; - int i; - - /* this must never happen */ - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) - FW(fecp, ievent, FEC_ENET_MII); -} - -int fs_mii_fec_init(struct fs_enet_mii_bus *bus) -{ - bd_t *bd = (bd_t *)__res; - const struct fs_mii_bus_info *bi = bus->bus_info; - fec_t *fecp; - - if (bi->id != 0) - return -1; - - bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec; - bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) - & 0x3F) << 1; - - fecp = bus->fec.fecp; - - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index eaa24fab645f..95ec5872c507 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -369,7 +369,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (fep->duplex) + if (fep->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); @@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev) scc_cr_cmd(fep, CPM_CR_RESTART_TX); } + + /*************************************************************************/ const struct fs_ops fs_scc_ops = { diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 48f9cf83ab6f..0b9b8b5c847c 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -33,6 +33,7 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> +#include <linux/platform_device.h> #include <asm/pgtable.h> #include <asm/irq.h> @@ -40,129 +41,25 @@ #include "fs_enet.h" -#ifdef CONFIG_8xx -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) +static int bitbang_prep_bit(u8 **datp, u8 *mskp, + struct fs_mii_bit *mii_bit) { - immap_t *im = (immap_t *)fs_enet_immap; - void *dir, *dat, *ppar; + void *dat; int adv; u8 msk; - switch (port) { - case fsiop_porta: - dir = &im->im_ioport.iop_padir; - dat = &im->im_ioport.iop_padat; - ppar = &im->im_ioport.iop_papar; - break; - - case fsiop_portb: - dir = &im->im_cpm.cp_pbdir; - dat = &im->im_cpm.cp_pbdat; - ppar = &im->im_cpm.cp_pbpar; - break; - - case fsiop_portc: - dir = &im->im_ioport.iop_pcdir; - dat = &im->im_ioport.iop_pcdat; - ppar = &im->im_ioport.iop_pcpar; - break; - - case fsiop_portd: - dir = &im->im_ioport.iop_pddir; - dat = &im->im_ioport.iop_pddat; - ppar = &im->im_ioport.iop_pdpar; - break; - - case fsiop_porte: - dir = &im->im_cpm.cp_pedir; - dat = &im->im_cpm.cp_pedat; - ppar = &im->im_cpm.cp_pepar; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } - - adv = bit >> 3; - dir = (char *)dir + adv; - dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } - - *dirp = dir; - *datp = dat; - *mskp = msk; - - return 0; -} -#endif - -#ifdef CONFIG_8260 -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) -{ - iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport; - void *dir, *dat, *ppar; - int adv; - u8 msk; - - switch (port) { - case fsiop_porta: - dir = &io->iop_pdira; - dat = &io->iop_pdata; - ppar = &io->iop_ppara; - break; - - case fsiop_portb: - dir = &io->iop_pdirb; - dat = &io->iop_pdatb; - ppar = &io->iop_pparb; - break; - - case fsiop_portc: - dir = &io->iop_pdirc; - dat = &io->iop_pdatc; - ppar = &io->iop_pparc; - break; - - case fsiop_portd: - dir = &io->iop_pdird; - dat = &io->iop_pdatd; - ppar = &io->iop_ppard; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } + dat = (void*) mii_bit->offset; - adv = bit >> 3; - dir = (char *)dir + adv; + adv = mii_bit->bit >> 3; dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } + msk = 1 << (7 - (mii_bit->bit & 7)); - *dirp = dir; *datp = dat; *mskp = msk; return 0; } -#endif static inline void bb_set(u8 *p, u8 m) { @@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m) return (in_8(p) & m) != 0; } -static inline void mdio_active(struct fs_enet_mii_bus *bus) +static inline void mdio_active(struct bb_info *bitbang) { - bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline void mdio_tristate(struct fs_enet_mii_bus *bus) +static inline void mdio_tristate(struct bb_info *bitbang ) { - bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline int mdio_read(struct fs_enet_mii_bus *bus) +static inline int mdio_read(struct bb_info *bitbang ) { - return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdio(struct fs_enet_mii_bus *bus, int what) +static inline void mdio(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); else - bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdc(struct fs_enet_mii_bus *bus, int what) +static inline void mdc(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_set(bitbang->mdc_dat, bitbang->mdc_msk); else - bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); } -static inline void mii_delay(struct fs_enet_mii_bus *bus) +static inline void mii_delay(struct bb_info *bitbang ) { - udelay(bus->bus_info->i.bitbang.delay); + udelay(bitbang->delay); } /* Utility to send the preamble, address, and register (common to read and write). */ -static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) +static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) { int j; @@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) * but it is safer and will be much more robust. */ - mdio_active(bus); - mdio(bus, 1); + mdio_active(bitbang); + mdio(bitbang, 1); for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } /* send the start bit (01) and the read opcode (10) or write (10) */ - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, !read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, !read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* send the PHY address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (addr & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (addr & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); addr <<= 1; } /* send the register address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (reg & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (reg & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); reg <<= 1; } } -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) +static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) { u16 rdreg; int ret, j; u8 addr = phy_id & 0xff; u8 reg = location & 0xff; + struct bb_info* bitbang = bus->priv; - bitbang_pre(bus, 1, addr, reg); + bitbang_pre(bitbang, 1, addr, reg); /* tri-state our MDIO I/O pin so we can read */ - mdc(bus, 0); - mdio_tristate(bus); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio_tristate(bitbang); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* check the turnaround bit: the PHY should be driving it to zero */ - if (mdio_read(bus) != 0) { + if (mdio_read(bitbang) != 0) { /* PHY didn't drive TA low */ for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } ret = -1; goto out; } - mdc(bus, 0); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); /* read 16 bits of register data, MSB first */ rdreg = 0; for (j = 0; j < 16; j++) { - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); rdreg <<= 1; - rdreg |= mdio_read(bus); - mdc(bus, 0); - mii_delay(bus); + rdreg |= mdio_read(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); } - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); ret = rdreg; out: return ret; } -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) +static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) { int j; + struct bb_info* bitbang = bus->priv; + u8 addr = phy_id & 0xff; u8 reg = location & 0xff; u16 value = val & 0xffff; - bitbang_pre(bus, 0, addr, reg); + bitbang_pre(bitbang, 0, addr, reg); /* send the turnaround (10) */ - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* write 16 bits of register data, MSB first */ for (j = 0; j < 16; j++) { - mdc(bus, 0); - mdio(bus, (value & 0x8000) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (value & 0x8000) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); value <<= 1; } /* * Tri-state the MDIO line. */ - mdio_tristate(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdio_tristate(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + return 0; } -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) +static int fs_enet_mii_bb_reset(struct mii_bus *bus) +{ + /*nothing here - dunno how to reset it*/ + return 0; +} + +static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) { - const struct fs_mii_bus_info *bi = bus->bus_info; int r; - r = bitbang_prep_bit(&bus->bitbang.mdio_dir, - &bus->bitbang.mdio_dat, - &bus->bitbang.mdio_msk, - bi->i.bitbang.mdio_port, - bi->i.bitbang.mdio_bit); + bitbang->delay = fmpi->delay; + + r = bitbang_prep_bit(&bitbang->mdio_dir, + &bitbang->mdio_dir_msk, + &fmpi->mdio_dir); if (r != 0) return r; - r = bitbang_prep_bit(&bus->bitbang.mdc_dir, - &bus->bitbang.mdc_dat, - &bus->bitbang.mdc_msk, - bi->i.bitbang.mdc_port, - bi->i.bitbang.mdc_bit); + r = bitbang_prep_bit(&bitbang->mdio_dat, + &bitbang->mdio_dat_msk, + &fmpi->mdio_dat); if (r != 0) return r; - bus->mii_read = mii_read; - bus->mii_write = mii_write; + r = bitbang_prep_bit(&bitbang->mdc_dat, + &bitbang->mdc_msk, + &fmpi->mdc_dat); + if (r != 0) + return r; return 0; } + + +static int __devinit fs_enet_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_bb_platform_info *pdata; + struct mii_bus *new_bus; + struct bb_info *bitbang; + int err = 0; + + if (NULL == dev) + return -EINVAL; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + + if (NULL == bitbang) + return -ENOMEM; + + new_bus->name = "BB MII Bus", + new_bus->read = &fs_enet_mii_bb_read, + new_bus->write = &fs_enet_mii_bb_write, + new_bus->reset = &fs_enet_mii_bb_reset, + new_bus->id = pdev->id; + + new_bus->phy_mask = ~0x9; + pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + fs_mii_bitbang_init(bitbang, pdata); + + new_bus->priv = bitbang; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(bitbang); + kfree(new_bus); + + return err; +} + + +static int fs_enet_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + + iounmap((void *) (&bus->priv)); + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_bb_mdio_driver = { + .name = "fsl-bb-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +int fs_enet_mdio_bb_init(void) +{ + return driver_register(&fs_enet_bb_mdio_driver); +} + +void fs_enet_mdio_bb_exit(void) +{ + driver_unregister(&fs_enet_bb_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c new file mode 100644 index 000000000000..1328e10caa35 --- /dev/null +++ b/drivers/net/fs_enet/mii-fec.c @@ -0,0 +1,243 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> + +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "fs_enet.h" +#include "fec.h" + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define FEC_MII_LOOPS 10000 + +static int match_has_phy (struct device *dev, void* data) +{ + struct platform_device* pdev = container_of(dev, struct platform_device, dev); + struct fs_platform_info* fpi; + if(strcmp(pdev->name, (char*)data)) + { + return 0; + } + + fpi = pdev->dev.platform_data; + if((fpi)&&(fpi->has_phy)) + return 1; + return 0; +} + +static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) +{ + struct resource *r; + fec_t *fecp; + char* name = "fsl-cpm-fec"; + + /* we need fec in order to be useful */ + struct platform_device *fec_pdev = + container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy), + struct platform_device, dev); + + if(fec_pdev == NULL) { + printk(KERN_ERR"Unable to find PHY for %s", name); + return -ENODEV; + } + + r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); + + fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); + fec->mii_speed = fmpi->mii_speed; + + setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + out_be32(&fecp->fec_mii_speed, fec->mii_speed); + + return 0; +} + +static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i, ret = -1; + + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + ret = in_be32(&fecp->fec_mii_data) & 0xffff; + } + + return ret; + +} + +static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i; + + /* this must never happen */ + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + + return 0; + +} + +static int fs_enet_fec_mii_reset(struct mii_bus *bus) +{ + /* nothing here - for now */ + return 0; +} + +static int __devinit fs_enet_fec_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_fec_platform_info *pdata; + struct mii_bus *new_bus; + struct fec_info *fec; + int err = 0; + if (NULL == dev) + return -EINVAL; + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + + if (NULL == fec) + return -ENOMEM; + + new_bus->name = "FEC MII Bus", + new_bus->read = &fs_enet_fec_mii_read, + new_bus->write = &fs_enet_fec_mii_write, + new_bus->reset = &fs_enet_fec_mii_reset, + new_bus->id = pdev->id; + + pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + + fs_mii_fec_init(fec, pdata); + new_bus->priv = fec; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(new_bus); + + return err; +} + + +static int fs_enet_fec_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + kfree(bus->priv); + + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_fec_mdio_driver = { + .name = "fsl-cpm-fec-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_fec_mdio_probe, + .remove = fs_enet_fec_mdio_remove, +}; + +int fs_enet_mdio_fec_init(void) +{ + return driver_register(&fs_enet_fec_mdio_driver); +} + +void fs_enet_mdio_fec_exit(void) +{ + driver_unregister(&fs_enet_fec_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c deleted file mode 100644 index ae4a9c3bb393..000000000000 --- a/drivers/net/fs_enet/mii-fixed.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/uaccess.h> - -#include "fs_enet.h" - -static const u16 mii_regs[7] = { - 0x3100, - 0x786d, - 0x0fff, - 0x0fff, - 0x01e1, - 0x45e1, - 0x0003, -}; - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - int ret = 0; - - if ((unsigned int)location >= ARRAY_SIZE(mii_regs)) - return -1; - - if (location != 5) - ret = mii_regs[location]; - else - ret = bus->fixed.lpa; - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) -{ - /* do nothing */ -} - -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus) -{ - const struct fs_mii_bus_info *bi = bus->bus_info; - - bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */ - - /* if speed is fixed at 10Mb, remove 100Mb modes */ - if (bi->i.fixed.speed == 10) - bus->fixed.lpa &= ~LPA_100; - - /* if duplex is half, remove full duplex modes */ - if (bi->i.fixed.duplex == 0) - bus->fixed.lpa &= ~LPA_DUPLEX; - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/lance.c b/drivers/net/lance.c index c1c3452c90ca..5b4dbfe5fb77 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 646e89fc3562..c0ec7f6abcb2 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); MODULE_LICENSE("GPL"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 07ca9480a6fe..9bdd43ab3573 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -177,6 +177,7 @@ struct myri10ge_priv { struct work_struct watchdog_work; struct timer_list watchdog_timer; int watchdog_tx_done; + int watchdog_tx_req; int watchdog_resets; int tx_linearized; int pause; @@ -448,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) struct mcp_gen_header *hdr; size_t hdr_offset; int status; + unsigned i; if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { dev_err(dev, "Unable to load %s firmware image via hotplug\n", @@ -479,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) goto abort_with_fw; crc = crc32(~0, fw->data, fw->size); - if (mgp->tx.boundary == 2048) { - /* Avoid PCI burst on chipset with unaligned completions. */ - int i; - __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + - MYRI10GE_FW_OFFSET); - for (i = 0; i < fw->size / 4; i++) { - __raw_writel(((u32 *) fw->data)[i], ptr + i); - wmb(); - } - } else { - myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, - fw->size); + for (i = 0; i < fw->size; i += 256) { + myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, + fw->data + i, + min(256U, (unsigned)(fw->size - i))); + mb(); + readb(mgp->sram); } /* corruption checking is good for parity recovery and buggy chipset */ memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); @@ -620,7 +616,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) return -ENXIO; } dev_info(&mgp->pdev->dev, "handoff confirmed\n"); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); return 0; } @@ -2429,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev) } myri10ge_reset(mgp); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); /* Save configuration space to be restored if the * nic resets due to a parity error */ @@ -2547,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) mgp = (struct myri10ge_priv *)arg; if (mgp->tx.req != mgp->tx.done && - mgp->tx.done == mgp->watchdog_tx_done) + mgp->tx.done == mgp->watchdog_tx_done && + mgp->watchdog_tx_req != mgp->watchdog_tx_done) /* nic seems like it might be stuck.. */ schedule_work(&mgp->watchdog_work); else @@ -2556,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg) jiffies + myri10ge_watchdog_timeout * HZ); mgp->watchdog_tx_done = mgp->tx.done; + mgp->watchdog_tx_req = mgp->tx.req; } static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index fa854c8fde75..4d52ecf8af56 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); -int init_module(void) +int __init init_module(void) { if(io <= 0x0 || !memend || !memstart || irq < 2) { printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index bb42ff218484..810cc572f5f7 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); -int init_module(void) +int __init init_module(void) { dev_ni65 = ni65_probe(-1); return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 9bae77ce1314..4122bb46f5ff 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -345,6 +345,7 @@ typedef struct local_info_t { void __iomem *dingo_ccr; /* only used for CEM56 cards */ unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; + struct work_struct tx_timeout_task; } local_info_t; /**************** @@ -352,6 +353,7 @@ typedef struct local_info_t { */ static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void do_tx_timeout(struct net_device *dev); +static void xirc2ps_tx_timeout_task(void *data); static struct net_device_stats *do_get_stats(struct net_device *dev); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); @@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link) #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = do_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); #endif return xirc2ps_config(link); @@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs) /*====================================================================*/ static void -do_tx_timeout(struct net_device *dev) +xirc2ps_tx_timeout_task(void *data) { - local_info_t *lp = netdev_priv(dev); - printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); - lp->stats.tx_errors++; + struct net_device *dev = data; /* reset the card */ do_reset(dev,1); dev->trans_start = jiffies; netif_wake_queue(dev); } +static void +do_tx_timeout(struct net_device *dev) +{ + local_info_t *lp = netdev_priv(dev); + lp->stats.tx_errors++; + printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); + schedule_work(&lp->tx_timeout_task); +} + static int do_start_xmit(struct sk_buff *skb, struct net_device *dev) { diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4daafe303358..d50bcb89dd28 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -202,6 +202,8 @@ static int homepna[MAX_UNITS]; #define CSR15 15 #define PCNET32_MC_FILTER 8 +#define PCNET32_79C970A 0x2621 + /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { u32 base; @@ -289,6 +291,7 @@ struct pcnet32_private { /* each bit indicates an available PHY */ u32 phymask; + unsigned short chip_version; /* which variant this is */ }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); @@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else { + } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; } spin_unlock_irqrestore(&lp->lock, flags); @@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, ulong ioaddr = dev->base_addr; int ticks; + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + /* set SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); @@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; + lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) @@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ - i = ((lp->a.read_csr(ioaddr, 88) | - (lp->a. - read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; - if (i == 0x2627) + if (lp->chip_version == 0x2627) val |= 3; } lp->a.write_bcr(ioaddr, 9, val); @@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev) netif_start_queue(dev); - /* Print the link status and start the watchdog */ - pcnet32_check_media(dev, 1); - mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + } i = 0; while (i++ < 100) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2ba6d3a40e2e..b79ec0d7480f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -56,5 +56,22 @@ config SMSC_PHY ---help--- Currently supports the LAN83C185 PHY +config FIXED_PHY + tristate "Drivers for PHY emulation on fixed speed/link" + depends on PHYLIB + ---help--- + Adds the driver to PHY layer to cover the boards that do not have any PHY bound, + but with the ability to manipulate with speed/link in software. The relavant MII + speed/duplex parameters could be effectively handled in user-specified fuction. + Currently tested with mpc866ads. + +config FIXED_MII_10_FDX + bool "Emulation for 10M Fdx fixed PHY behavior" + depends on FIXED_PHY + +config FIXED_MII_100_FDX + bool "Emulation for 100M Fdx fixed PHY behavior" + depends on FIXED_PHY + endmenu diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a00e61942525..320f8323123f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o +obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c new file mode 100644 index 000000000000..341036df4710 --- /dev/null +++ b/drivers/net/phy/fixed.c @@ -0,0 +1,358 @@ +/* + * drivers/net/phy/fixed.c + * + * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode. + * + * Author: Vitaly Bordug + * + * Copyright (c) 2006 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/unistd.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/phy.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#define MII_REGS_NUM 7 + +/* + The idea is to emulate normal phy behavior by responding with + pre-defined values to mii BMCR read, so that read_status hook could + take all the needed info. +*/ + +struct fixed_phy_status { + u8 link; + u16 speed; + u8 duplex; +}; + +/*----------------------------------------------------------------------------- + * Private information hoder for mii_bus + *-----------------------------------------------------------------------------*/ +struct fixed_info { + u16 *regs; + u8 regs_num; + struct fixed_phy_status phy_status; + struct phy_device *phydev; /* pointer to the container */ + /* link & speed cb */ + int(*link_update)(struct net_device*, struct fixed_phy_status*); + +}; + +/*----------------------------------------------------------------------------- + * If something weird is required to be done with link/speed, + * network driver is able to assign a function to implement this. + * May be useful for PHY's that need to be software-driven. + *-----------------------------------------------------------------------------*/ +int fixed_mdio_set_link_update(struct phy_device* phydev, + int(*link_update)(struct net_device*, struct fixed_phy_status*)) +{ + struct fixed_info *fixed; + + if(link_update == NULL) + return -EINVAL; + + if(phydev) { + if(phydev->bus) { + fixed = phydev->bus->priv; + fixed->link_update = link_update; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL(fixed_mdio_set_link_update); + +/*----------------------------------------------------------------------------- + * This is used for updating internal mii regs from the status + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_update_regs(struct fixed_info *fixed) +{ + u16 *regs = fixed->regs; + u16 bmsr = 0; + u16 bmcr = 0; + + if(!regs) { + printk(KERN_ERR "%s: regs not set up", __FUNCTION__); + return -EINVAL; + } + + if(fixed->phy_status.link) + bmsr |= BMSR_LSTATUS; + + if(fixed->phy_status.duplex) { + bmcr |= BMCR_FULLDPLX; + + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100FULL; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_10FULL; + break; + } + } else { + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100HALF; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_100HALF; + break; + } + } + + regs[MII_BMCR] = bmcr; + regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ + + return 0; +} + +static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) +{ + struct fixed_info *fixed = bus->priv; + + /* if user has registered link update callback, use it */ + if(fixed->phydev) + if(fixed->phydev->attached_dev) { + if(fixed->link_update) { + fixed->link_update(fixed->phydev->attached_dev, + &fixed->phy_status); + fixed_mdio_update_regs(fixed); + } + } + + if ((unsigned int)location >= fixed->regs_num) + return -1; + return fixed->regs[location]; +} + +static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + /* do nothing for now*/ + return 0; +} + +static int fixed_mii_reset(struct mii_bus *bus) +{ + /*nothing here - no way/need to reset it*/ + return 0; +} + +static int fixed_config_aneg(struct phy_device *phydev) +{ + /* :TODO:03/13/2006 09:45:37 PM:: + The full autoneg funcionality can be emulated, + but no need to have anything here for now + */ + return 0; +} + +/*----------------------------------------------------------------------------- + * the manual bind will do the magic - with phy_id_mask == 0 + * match will never return true... + *-----------------------------------------------------------------------------*/ +static struct phy_driver fixed_mdio_driver = { + .name = "Fixed PHY", + .features = PHY_BASIC_FEATURES, + .config_aneg = fixed_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE,}, +}; + +/*----------------------------------------------------------------------------- + * This func is used to create all the necessary stuff, bind + * the fixed phy driver and register all it on the mdio_bus_type. + * speed is either 10 or 100, duplex is boolean. + * number is used to create multiple fixed PHYs, so that several devices can + * utilize them simultaneously. + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_register_device(int number, int speed, int duplex) +{ + struct mii_bus *new_bus; + struct fixed_info *fixed; + struct phy_device *phydev; + int err = 0; + + struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); + + if (NULL == dev) + return -ENOMEM; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) { + kfree(dev); + return -ENOMEM; + } + fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); + + if (NULL == fixed) { + kfree(dev); + kfree(new_bus); + return -ENOMEM; + } + + fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); + fixed->regs_num = MII_REGS_NUM; + fixed->phy_status.speed = speed; + fixed->phy_status.duplex = duplex; + fixed->phy_status.link = 1; + + new_bus->name = "Fixed MII Bus", + new_bus->read = &fixed_mii_read, + new_bus->write = &fixed_mii_write, + new_bus->reset = &fixed_mii_reset, + + /*set up workspace*/ + fixed_mdio_update_regs(fixed); + new_bus->priv = fixed; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + /* create phy_device and register it on the mdio bus */ + phydev = phy_device_create(new_bus, 0, 0); + + /* + Put the phydev pointer into the fixed pack so that bus read/write code could + be able to access for instance attached netdev. Well it doesn't have to do + so, only in case of utilizing user-specified link-update... + */ + fixed->phydev = phydev; + + if(NULL == phydev) { + err = -ENOMEM; + goto device_create_fail; + } + + phydev->irq = -1; + phydev->dev.bus = &mdio_bus_type; + + if(number) + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed_%d@%d:%d", number, speed, duplex); + else + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed@%d:%d", speed, duplex); + phydev->bus = new_bus; + + err = device_register(&phydev->dev); + if(err) { + printk(KERN_ERR "Phy %s failed to register\n", + phydev->dev.bus_id); + goto bus_register_fail; + } + + /* + the mdio bus has phy_id match... In order not to do it + artificially, we are binding the driver here by hand; + it will be the same for all the fixed phys anyway. + */ + down_write(&phydev->dev.bus->subsys.rwsem); + + phydev->dev.driver = &fixed_mdio_driver.driver; + + err = phydev->dev.driver->probe(&phydev->dev); + if(err < 0) { + printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); + up_write(&phydev->dev.bus->subsys.rwsem); + goto probe_fail; + } + + device_bind_driver(&phydev->dev); + up_write(&phydev->dev.bus->subsys.rwsem); + + return 0; + +probe_fail: + device_unregister(&phydev->dev); +bus_register_fail: + kfree(phydev); +device_create_fail: + kfree(dev); + kfree(new_bus); + kfree(fixed); + + return err; +} + + +MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); +MODULE_AUTHOR("Vitaly Bordug"); +MODULE_LICENSE("GPL"); + +static int __init fixed_init(void) +{ + int ret; + int duplex = 0; + + /* register on the bus... Not expected to be matched with anything there... */ + phy_driver_register(&fixed_mdio_driver); + + /* So let the fun begin... + We will create several mdio devices here, and will bound the upper + driver to them. + + Then the external software can lookup the phy bus by searching + fixed@speed:duplex, e.g. fixed@100:1, to be connected to the + virtual 100M Fdx phy. + + In case several virtual PHYs required, the bus_id will be in form + fixed_<num>@<speed>:<duplex>, which make it able even to define + driver-specific link control callback, if for instance PHY is completely + SW-driven. + + */ + +#ifdef CONFIG_FIXED_MII_DUPLEX + duplex = 1; +#endif + +#ifdef CONFIG_FIXED_MII_100_FDX + fixed_mdio_register_device(0, 100, 1); +#endif + +#ifdef CONFIX_FIXED_MII_10_FDX + fixed_mdio_register_device(0, 10, 1); +#endif + return 0; +} + +static void __exit fixed_exit(void) +{ + phy_driver_unregister(&fixed_mdio_driver); + /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ +} + +module_init(fixed_init); +module_exit(fixed_exit); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 1dde390c164d..cf6660c93ffa 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = { .suspend = mdio_bus_suspend, .resume = mdio_bus_resume, }; +EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d5c2233c252..f5aad77288f9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev, /* phy_stop_machine * - * description: Stops the state machine timer, sets the state to - * UP (unless it wasn't up yet), and then frees the interrupt, - * if it is in use. This function must be called BEFORE + * description: Stops the state machine timer, sets the state to UP + * (unless it wasn't up yet). This function must be called BEFORE * phy_detach. */ void phy_stop_machine(struct phy_device *phydev) @@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev) phydev->state = PHY_UP; spin_unlock(&phydev->lock); - if (phydev->irq != PHY_POLL) - phy_stop_interrupts(phydev); - phydev->adjust_state = NULL; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1bc1e032c5d6..2d1ecfdc80db 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -45,6 +45,35 @@ static struct phy_driver genphy_driver; extern int mdio_bus_init(void); extern void mdio_bus_exit(void); +struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) +{ + struct phy_device *dev; + /* We allocate the device, and initialize the + * default values */ + dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); + + if (NULL == dev) + return (struct phy_device*) PTR_ERR((void*)-ENOMEM); + + dev->speed = 0; + dev->duplex = -1; + dev->pause = dev->asym_pause = 0; + dev->link = 1; + + dev->autoneg = AUTONEG_ENABLE; + + dev->addr = addr; + dev->phy_id = phy_id; + dev->bus = bus; + + dev->state = PHY_DOWN; + + spin_lock_init(&dev->lock); + + return dev; +} +EXPORT_SYMBOL(phy_device_create); + /* get_phy_device * * description: Reads the ID registers of the PHY at addr on the @@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr) if (0xffffffff == phy_id) return NULL; - /* Otherwise, we allocate the device, and initialize the - * default values */ - dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); - - if (NULL == dev) - return ERR_PTR(-ENOMEM); - - dev->speed = 0; - dev->duplex = -1; - dev->pause = dev->asym_pause = 0; - dev->link = 1; - - dev->autoneg = AUTONEG_ENABLE; - - dev->addr = addr; - dev->phy_id = phy_id; - dev->bus = bus; - - dev->state = PHY_DOWN; - - spin_lock_init(&dev->lock); + dev = phy_device_create(bus, addr, phy_id); return dev; } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0ec6e9d57b94..c872f7c6cce3 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -192,7 +192,7 @@ struct cardmap { void *ptr[CARDMAP_WIDTH]; }; static void *cardmap_get(struct cardmap *map, unsigned int nr); -static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); +static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); @@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan) { struct channel *pch; - pch = kmalloc(sizeof(struct channel), GFP_KERNEL); + pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (pch == 0) return -ENOMEM; - memset(pch, 0, sizeof(struct channel)); pch->ppp = NULL; pch->chan = chan; chan->ppp = pch; @@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp) int ret = -ENOMEM; int i; - ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); + ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); if (!ppp) goto out; dev = alloc_netdev(0, "", ppp_setup); if (!dev) goto out1; - memset(ppp, 0, sizeof(struct ppp)); ppp->mru = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); @@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp) } atomic_inc(&ppp_unit_count); - cardmap_set(&all_ppp_units, unit, ppp); + ret = cardmap_set(&all_ppp_units, unit, ppp); + if (ret != 0) + goto out3; + mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; +out3: + atomic_dec(&ppp_unit_count); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); @@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr) return NULL; } -static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) +static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) { struct cardmap *p; int i; @@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { do { /* need a new top level */ - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->ptr[0] = p; if (p != NULL) { np->shift = p->shift + CARDMAP_ORDER; @@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) while (p->shift > 0) { i = (nr >> p->shift) & CARDMAP_MASK; if (p->ptr[i] == NULL) { - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->shift = p->shift - CARDMAP_ORDER; np->parent = p; p->ptr[i] = np; @@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) set_bit(i, &p->inuse); else clear_bit(i, &p->inuse); + return 0; + enomem: + return -ENOMEM; } static unsigned int cardmap_find_first_free(struct cardmap *map) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index e1fe3a0a7b0b..e72e0e099060 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -71,12 +71,13 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/div64.h> +#include <asm/irq.h> /* local include */ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.14.2" +#define DRV_VERSION "2.0.15.2" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -370,38 +371,50 @@ static const u64 fix_mac[] = { END_SIGN }; +MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + + /* Module Loadable parameters. */ -static unsigned int tx_fifo_num = 1; -static unsigned int tx_fifo_len[MAX_TX_FIFOS] = - {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; -static unsigned int rx_ring_num = 1; -static unsigned int rx_ring_sz[MAX_RX_RINGS] = - {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; -static unsigned int rts_frm_len[MAX_RX_RINGS] = - {[0 ...(MAX_RX_RINGS - 1)] = 0 }; -static unsigned int rx_ring_mode = 1; -static unsigned int use_continuous_tx_intrs = 1; -static unsigned int rmac_pause_time = 0x100; -static unsigned int mc_pause_threshold_q0q3 = 187; -static unsigned int mc_pause_threshold_q4q7 = 187; -static unsigned int shared_splits; -static unsigned int tmac_util_period = 5; -static unsigned int rmac_util_period = 5; -static unsigned int bimodal = 0; -static unsigned int l3l4hdr_size = 128; -#ifndef CONFIG_S2IO_NAPI -static unsigned int indicate_max_pkts; -#endif +S2IO_PARM_INT(tx_fifo_num, 1); +S2IO_PARM_INT(rx_ring_num, 1); + + +S2IO_PARM_INT(rx_ring_mode, 1); +S2IO_PARM_INT(use_continuous_tx_intrs, 1); +S2IO_PARM_INT(rmac_pause_time, 0x100); +S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); +S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); +S2IO_PARM_INT(shared_splits, 0); +S2IO_PARM_INT(tmac_util_period, 5); +S2IO_PARM_INT(rmac_util_period, 5); +S2IO_PARM_INT(bimodal, 0); +S2IO_PARM_INT(l3l4hdr_size, 128); /* Frequency of Rx desc syncs expressed as power of 2 */ -static unsigned int rxsync_frequency = 3; +S2IO_PARM_INT(rxsync_frequency, 3); /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ -static unsigned int intr_type = 0; +S2IO_PARM_INT(intr_type, 0); /* Large receive offload feature */ -static unsigned int lro = 0; +S2IO_PARM_INT(lro, 0); /* Max pkts to be aggregated by LRO at one time. If not specified, * aggregation happens until we hit max IP pkt size(64K) */ -static unsigned int lro_max_pkts = 0xFFFF; +S2IO_PARM_INT(lro_max_pkts, 0xFFFF); +#ifndef CONFIG_S2IO_NAPI +S2IO_PARM_INT(indicate_max_pkts, 0); +#endif + +static unsigned int tx_fifo_len[MAX_TX_FIFOS] = + {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; +static unsigned int rx_ring_sz[MAX_RX_RINGS] = + {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; +static unsigned int rts_frm_len[MAX_RX_RINGS] = + {[0 ...(MAX_RX_RINGS - 1)] = 0 }; + +module_param_array(tx_fifo_len, uint, NULL, 0); +module_param_array(rx_ring_sz, uint, NULL, 0); +module_param_array(rts_frm_len, uint, NULL, 0); /* * S2IO device table. @@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic) size += config->tx_cfg[i].fifo_len; } if (size > MAX_AVAILABLE_TXDS) { - DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", - __FUNCTION__); + DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); - return FAILURE; + return -EINVAL; } lst_size = (sizeof(TxD_t) * config->max_txds); @@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic) nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); if (!nic->ufo_in_band_v) return -ENOMEM; + memset(nic->ufo_in_band_v, 0, size); /* Allocation and initialization of RXDs in Rings */ size = 0; @@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic) break; } - /* Enable Tx FIFO partition 0. */ + /* Enable all configured Tx FIFO partitions */ val64 = readq(&bar0->tx_fifo_partition_0); val64 |= (TX_FIFO_PARTITION_EN); writeq(val64, &bar0->tx_fifo_partition_0); @@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) writeq(temp64, &bar0->general_int_mask); /* * If Hercules adapter enable GPIO otherwise - * disabled all PCIX, Flash, MDIO, IIC and GPIO + * disable all PCIX, Flash, MDIO, IIC and GPIO * interrupts for now. * TODO */ @@ -2119,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in frag->size, PCI_DMA_TODEVICE); } } - txdlp->Host_Control = 0; + memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); return(skb); } @@ -2371,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) skb->data = (void *) (unsigned long)tmp; skb->tail = (void *) (unsigned long)tmp; - ((RxD3_t*)rxdp)->Buffer0_ptr = - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, + if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) + ((RxD3_t*)rxdp)->Buffer0_ptr = + pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); + else + pci_dma_sync_single_for_device(nic->pdev, + (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (nic->rxd_mode == RXD_MODE_3B) { /* Two buffer mode */ @@ -2386,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) (nic->pdev, skb->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - /* Buffer-1 will be dummy buffer not used */ - ((RxD3_t*)rxdp)->Buffer1_ptr = - pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); + /* Buffer-1 will be dummy buffer. Not used */ + if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { + ((RxD3_t*)rxdp)->Buffer1_ptr = + pci_map_single(nic->pdev, + ba->ba_1, BUF1_LEN, + PCI_DMA_FROMDEVICE); + } rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3 (dev->mtu + 4); @@ -2614,23 +2635,23 @@ no_rx: } #endif +#ifdef CONFIG_NET_POLL_CONTROLLER /** - * s2io_netpoll - Rx interrupt service handler for netpoll support + * s2io_netpoll - netpoll event handler entry point * @dev : pointer to the device structure. * Description: - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. + * This function will be called by upper layer to check for events on the + * interface in situations where interrupts are disabled. It is used for + * specific in-kernel networking tasks, such as remote consoles and kernel + * debugging over the network (example netdump in RedHat). */ - -#ifdef CONFIG_NET_POLL_CONTROLLER static void s2io_netpoll(struct net_device *dev) { nic_t *nic = dev->priv; mac_info_t *mac_control; struct config_param *config; XENA_dev_config_t __iomem *bar0 = nic->bar0; - u64 val64; + u64 val64 = 0xFFFFFFFFFFFFFFFFULL; int i; disable_irq(dev->irq); @@ -2639,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev) mac_control = &nic->mac_control; config = &nic->config; - val64 = readq(&bar0->rx_traffic_int); writeq(val64, &bar0->rx_traffic_int); + writeq(val64, &bar0->tx_traffic_int); + /* we need to free up the transmitted skbufs or else netpoll will + * run out of skbs and will fail and eventually netpoll application such + * as netdump will fail. + */ + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); + + /* check for received packet and indicate up to network */ for (i = 0; i < config->rx_ring_num; i++) rx_intr_handler(&mac_control->rings[i]); @@ -2708,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data) /* If your are next to put index then it's FIFO full condition */ if ((get_block == put_block) && (get_info.offset + 1) == put_info.offset) { - DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); + DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); break; } skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); @@ -2728,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data) HEADER_SNAP_SIZE, PCI_DMA_FROMDEVICE); } else if (nic->rxd_mode == RXD_MODE_3B) { - pci_unmap_single(nic->pdev, (dma_addr_t) + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) - ((RxD3_t*)rxdp)->Buffer1_ptr, - BUF1_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu + 4, PCI_DMA_FROMDEVICE); } else { - pci_unmap_single(nic->pdev, (dma_addr_t) + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) @@ -3327,7 +3353,7 @@ static void s2io_reset(nic_t * sp) /* Clear certain PCI/PCI-X fields after reset */ if (sp->device_type == XFRAME_II_DEVICE) { - /* Clear parity err detect bit */ + /* Clear "detected parity error" bit */ pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); /* Clearing PCIX Ecc status register */ @@ -3528,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic) u64 val64; int i; - for (i=0; i< nic->avail_msix_vectors; i++) { + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { writeq(nic->msix_info[i].addr, &bar0->xmsi_address); writeq(nic->msix_info[i].data, &bar0->xmsi_data); val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); @@ -3547,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic) int i; /* Store and display */ - for (i=0; i< nic->avail_msix_vectors; i++) { + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { val64 = (BIT(15) | vBIT(i, 26, 6)); writeq(val64, &bar0->xmsi_access); if (wait_for_msix_trans(nic, i)) { @@ -3808,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) TxD_t *txdp; TxFIFO_element_t __iomem *tx_fifo; unsigned long flags; -#ifdef NETIF_F_TSO - int mss; -#endif u16 vlan_tag = 0; int vlan_priority = 0; mac_info_t *mac_control; struct config_param *config; + int offload_type; mac_control = &sp->mac_control; config = &sp->config; @@ -3862,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - txdp->Control_1 = 0; - txdp->Control_2 = 0; + offload_type = s2io_offload_type(skb); #ifdef NETIF_F_TSO - mss = skb_shinfo(skb)->gso_size; - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { txdp->Control_1 |= TXD_TCP_LSO_EN; - txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); + txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); } #endif if (skb->ip_summed == CHECKSUM_HW) { @@ -3886,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb->len - skb->data_len; - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { + if (offload_type == SKB_GSO_UDP) { int ufo_size; - ufo_size = skb_shinfo(skb)->gso_size; + ufo_size = s2io_udp_mss(skb); ufo_size &= ~7; txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_MSS(ufo_size); @@ -3906,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) sp->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); txdp++; - txdp->Control_1 = 0; - txdp->Control_2 = 0; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); txdp->Host_Control = (unsigned long) skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); - - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) txdp->Control_1 |= TXD_UFO_EN; frg_cnt = skb_shinfo(skb)->nr_frags; @@ -3930,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) (sp->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) txdp->Control_1 |= TXD_UFO_EN; } txdp->Control_1 |= TXD_GATHER_CODE_LAST; - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) frg_cnt++; /* as Txd0 was used for inband header */ tx_fifo = mac_control->tx_FIFO_start[queue]; @@ -3944,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | TX_FIFO_LAST_LIST); - -#ifdef NETIF_F_TSO - if (mss) - val64 |= TX_FIFO_SPECIAL_FUNC; -#endif - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type) val64 |= TX_FIFO_SPECIAL_FUNC; + writeq(val64, &tx_fifo->List_Control); mmiowb(); @@ -3984,13 +3999,41 @@ s2io_alarm_handle(unsigned long data) mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } +static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) +{ + int rxb_size, level; + + if (!sp->lro) { + rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); + level = rx_buffer_level(sp, rxb_size, rng_n); + + if ((level == PANIC) && (!TASKLET_IN_USE)) { + int ret; + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); + DBG_PRINT(INTR_DBG, "PANIC levels\n"); + if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "Out of memory in %s", + __FUNCTION__); + clear_bit(0, (&sp->tasklet_status)); + return -1; + } + clear_bit(0, (&sp->tasklet_status)); + } else if (level == LOW) + tasklet_schedule(&sp->task); + + } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); + DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); + } + return 0; +} + static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_id; nic_t *sp = dev->priv; int i; - int ret; mac_info_t *mac_control; struct config_param *config; @@ -4012,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) * reallocate the buffers from the interrupt handler itself, * else schedule a tasklet to reallocate the buffers. */ - for (i = 0; i < config->rx_ring_num; i++) { - if (!sp->lro) { - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); - int level = rx_buffer_level(sp, rxb_size, i); - - if ((level == PANIC) && (!TASKLET_IN_USE)) { - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", - dev->name); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in ISR!!\n"); - clear_bit(0, (&sp->tasklet_status)); - atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, i) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); - break; - } - } + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; @@ -4051,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) { ring_info_t *ring = (ring_info_t *)dev_id; nic_t *sp = ring->nic; - struct net_device *dev = (struct net_device *) dev_id; - int rxb_size, level, rng_n; atomic_inc(&sp->isr_cnt); - rx_intr_handler(ring); - - rng_n = ring->ring_no; - if (!sp->lro) { - rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); - level = rx_buffer_level(sp, rxb_size, rng_n); - if ((level == PANIC) && (!TASKLET_IN_USE)) { - int ret; - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "Out of memory in %s", - __FUNCTION__); - clear_bit(0, (&sp->tasklet_status)); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); - } + rx_intr_handler(ring); + s2io_chk_rx_buffers(sp, ring->ring_no); atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; } @@ -4248,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) * else schedule a tasklet to reallocate the buffers. */ #ifndef CONFIG_S2IO_NAPI - for (i = 0; i < config->rx_ring_num; i++) { - if (!sp->lro) { - int ret; - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); - int level = rx_buffer_level(sp, rxb_size, i); - - if ((level == PANIC) && (!TASKLET_IN_USE)) { - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", - dev->name); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in ISR!!\n"); - clear_bit(0, (&sp->tasklet_status)); - atomic_dec(&sp->isr_cnt); - writeq(org_mask, &bar0->general_int_mask); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, i) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); - break; - } - } + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); #endif writeq(org_mask, &bar0->general_int_mask); atomic_dec(&sp->isr_cnt); @@ -4308,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp) if (cnt == 5) break; /* Updt failed */ } while(1); + } else { + memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); } } @@ -4942,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) } static void s2io_vpd_read(nic_t *nic) { - u8 vpd_data[256],data; + u8 *vpd_data; + u8 data; int i=0, cnt, fail = 0; int vpd_addr = 0x80; @@ -4955,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic) vpd_addr = 0x50; } + vpd_data = kmalloc(256, GFP_KERNEL); + if (!vpd_data) + return; + for (i = 0; i < 256; i +=4 ) { pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); @@ -4977,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic) memset(nic->product_name, 0, vpd_data[1]); memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); } + kfree(vpd_data); } /** @@ -5295,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) else *data = 0; - return 0; + return *data; } /** @@ -5753,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) return 0; } +static u32 s2io_ethtool_op_get_tso(struct net_device *dev) +{ + return (dev->features & NETIF_F_TSO) != 0; +} +static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return 0; +} static struct ethtool_ops netdev_ethtool_ops = { .get_settings = s2io_ethtool_gset, @@ -5773,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = { .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, #ifdef NETIF_F_TSO - .get_tso = ethtool_op_get_tso, - .set_tso = ethtool_op_set_tso, + .get_tso = s2io_ethtool_op_get_tso, + .set_tso = s2io_ethtool_op_set_tso, #endif .get_ufo = ethtool_op_get_ufo, .set_ufo = ethtool_op_set_ufo, @@ -6337,7 +6319,7 @@ static int s2io_card_up(nic_t * sp) s2io_set_multicast(dev); if (sp->lro) { - /* Initialize max aggregatable pkts based on MTU */ + /* Initialize max aggregatable pkts per session based on MTU */ sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; /* Check if we can use(if specified) user provided value */ if (lro_max_pkts < sp->lro_max_aggr_per_sess) @@ -6438,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev) * @cksum : FCS checksum of the frame. * @ring_no : the ring from which this RxD was extracted. * Description: - * This function is called by the Tx interrupt serivce routine to perform + * This function is called by the Rx interrupt serivce routine to perform * some OS related operations on the SKB before passing it to the upper * layers. It mainly checks if the checksum is OK, if so adds it to the * SKBs cksum variable, increments the Rx packet count and passes the SKB @@ -6698,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp) pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); } -MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -module_param(tx_fifo_num, int, 0); -module_param(rx_ring_num, int, 0); -module_param(rx_ring_mode, int, 0); -module_param_array(tx_fifo_len, uint, NULL, 0); -module_param_array(rx_ring_sz, uint, NULL, 0); -module_param_array(rts_frm_len, uint, NULL, 0); -module_param(use_continuous_tx_intrs, int, 1); -module_param(rmac_pause_time, int, 0); -module_param(mc_pause_threshold_q0q3, int, 0); -module_param(mc_pause_threshold_q4q7, int, 0); -module_param(shared_splits, int, 0); -module_param(tmac_util_period, int, 0); -module_param(rmac_util_period, int, 0); -module_param(bimodal, bool, 0); -module_param(l3l4hdr_size, int , 0); -#ifndef CONFIG_S2IO_NAPI -module_param(indicate_max_pkts, int, 0); -#endif -module_param(rxsync_frequency, int, 0); -module_param(intr_type, int, 0); -module_param(lro, int, 0); -module_param(lro_max_pkts, int, 0); - static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) { if ( tx_fifo_num > 8) { @@ -6832,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) } if (dev_intr_type != MSI_X) { if (pci_request_regions(pdev, s2io_driver_name)) { - DBG_PRINT(ERR_DBG, "Request Regions failed\n"), - pci_disable_device(pdev); + DBG_PRINT(ERR_DBG, "Request Regions failed\n"); + pci_disable_device(pdev); return -ENODEV; } } @@ -6957,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* initialize the shared memory used by the NIC and the host */ if (init_shared_mem(sp)) { DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", - __FUNCTION__); + dev->name); ret = -ENOMEM; goto mem_alloc_failed; } @@ -7094,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->addr_len = ETH_ALEN; memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + /* reset Nic and bring it to known state */ + s2io_reset(sp); + /* * Initialize the tasklet status and link state flags * and the card state parameter @@ -7131,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) goto register_failed; } s2io_vpd_read(sp); - DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); - DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", - get_xena_rev_id(sp->pdev), - s2io_driver_version); DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); + DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, + sp->product_name, get_xena_rev_id(sp->pdev)); + DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, + s2io_driver_version); DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, sp->def_mac_addr[0].mac_addr[0], @@ -7436,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, if (ip->ihl != 5) /* IP has options */ return -1; + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) + return -1; + + /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || - !tcp->ack) { + tcp->ece || tcp->cwr || !tcp->ack) { /* * Currently recognize only the ack control word and * any other control field being set would result in @@ -7591,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb) static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len) { - struct sk_buff *tmp, *first = lro->parent; + struct sk_buff *first = lro->parent; first->len += tcp_len; first->data_len = lro->frags_len; skb_pull(skb, (skb->len - tcp_len)); - if ((tmp = skb_shinfo(first)->frag_list)) { - while (tmp->next) - tmp = tmp->next; - tmp->next = skb; - } + if (skb_shinfo(first)->frag_list) + lro->last_frag->next = skb; else skb_shinfo(first)->frag_list = skb; + lro->last_frag = skb; sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; return; } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 217097bc22f1..5ed49c3be1e9 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -719,6 +719,7 @@ struct msix_info_st { /* Data structure to represent a LRO session */ typedef struct lro { struct sk_buff *parent; + struct sk_buff *last_frag; u8 *l2h; struct iphdr *iph; struct tcphdr *tcph; @@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro); static void queue_rx_frame(struct sk_buff *skb); static void update_L3L4_header(nic_t *sp, lro_t *lro); static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); + +#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type + +#define S2IO_PARM_INT(X, def_val) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0); + #endif /* _S2IO_H */ diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index efd0f235020f..01392bca0223 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -742,7 +742,7 @@ module_param(irq, int, 0); MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); -int init_module(void) +int __init init_module(void) { dev_seeq = seeq8005_probe(-1); if (IS_ERR(dev_seeq)) diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 82200bfaa8ed..ad878dfddef4 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev, /* Chip internal frequency for clock calculations */ static inline u32 hwkhz(const struct skge_hw *hw) { - if (hw->chip_id == CHIP_ID_GENESIS) - return 53215; /* or: 53.125 MHz */ - else - return 78215; /* or: 78.125 MHz */ + return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; } /* Chip HZ to microseconds */ @@ -2214,6 +2211,7 @@ static int skge_up(struct net_device *dev) skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); skge_led(skge, LED_MODE_ON); + netif_poll_enable(dev); return 0; free_rx_ring: @@ -2282,6 +2280,7 @@ static int skge_down(struct net_device *dev) skge_led(skge, LED_MODE_OFF); + netif_poll_disable(dev); skge_tx_clean(skge); skge_rx_clean(skge); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index de91609ca112..933e87f1cc68 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) if (hw->ports > 1) reg1 |= PCI_Y2_PHY2_COMA; } + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + udelay(100); if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_pci_write32(hw, PCI_DEV_REG3, 0); @@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) sky2_pci_write32(hw, PCI_DEV_REG5, 0); } - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - udelay(100); - break; case PCI_D3hot: diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index d37bd860b336..0b15290df278 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs /* Spurious interrupt check */ if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { + spin_unlock_irqrestore(&lp->lock, flags); return IRQ_NONE; } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 3d8dcb6c8758..cf62373b808b 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev) DBG(2, "%s: %s\n", dev->name, __FUNCTION__); /* Disable all interrupts, block TX tasklet */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); /* free any pending tx skb */ if (pending_skb) { @@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev) DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); /* no more interrupts for me */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); if (pending_skb) dev_kfree_skb(pending_skb); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 4ec4b4d23ae5..7aa7fbac8224 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -136,14 +136,9 @@ #define SMC_CAN_USE_32BIT 0 #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 -#define SMC_inb(a, r) readb((a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) @@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) #include <asm/mach-types.h> #include <asm/arch/cpu.h> @@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #define SMC_IRQ_FLAGS (0) +#elif defined(CONFIG_ARCH_VERSATILE) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define SMC_IRQ_FLAGS (0) + #else #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 647f62e9707d..88907218457a 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev) int result; result = -ENOMEM; - if (spider_net_init_chain(card, &card->tx_chain, - card->descr, - PCI_DMA_TODEVICE, tx_descriptors)) + if (spider_net_init_chain(card, &card->tx_chain, card->descr, + PCI_DMA_TODEVICE, card->tx_desc)) goto alloc_tx_failed; if (spider_net_init_chain(card, &card->rx_chain, - card->descr + tx_descriptors, - PCI_DMA_FROMDEVICE, rx_descriptors)) + card->descr + card->rx_desc, + PCI_DMA_FROMDEVICE, card->rx_desc)) goto alloc_rx_failed; /* allocate rx skbs */ @@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card) card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; + card->tx_desc = tx_descriptors; + card->rx_desc = rx_descriptors; + spider_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index f6dcf180ae3d..30407cdf0892 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -440,6 +440,9 @@ struct spider_net_card { /* for ethtool */ int msg_enable; + int rx_desc; + int tx_desc; + struct spider_net_descr descr[0]; }; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index a5bb0b7633af..02209222b8c9 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) return 0; } +static void +spider_net_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct spider_net_card *card = netdev->priv; + + ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; + ering->tx_pending = card->tx_desc; + ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; + ering->rx_pending = card->rx_desc; +} + struct ethtool_ops spider_net_ethtool_ops = { .get_settings = spider_net_ethtool_get_settings, .get_drvinfo = spider_net_ethtool_get_drvinfo, @@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = { .set_rx_csum = spider_net_ethtool_set_rx_csum, .get_tx_csum = spider_net_ethtool_get_tx_csum, .set_tx_csum = spider_net_ethtool_set_tx_csum, + .get_ringparam = spider_net_ethtool_get_ringparam, }; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index ac17377b3e9f..698568e751da 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -107,7 +107,7 @@ static char *media[MAX_UNITS]; #endif /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" KERN_INFO " http://www.scyld.com/network/sundance.html\n"; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) { if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || (idprom->id_machtype == (SM_SUN4|SM_4_470))) { - memset(&sun4_sdev, 0, sizeof(sdev)); + memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; sun4_sdev.irqs[0] = 6; return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); @@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) static int __exit sunlance_sun4_remove(void) { - struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); + struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); struct net_device *net_dev = lp->dev; unregister_netdevice(net_dev); - lance_free_hwresources(root_lance_dev); + lance_free_hwresources(lp); free_netdev(net_dev); - dev_set_drvdata(&sun4_sdev->dev, NULL); + dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 1b8138f641e3..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.63" -#define DRV_MODULE_RELDATE "July 25, 2006" +#define DRV_MODULE_VERSION "3.65" +#define DRV_MODULE_RELDATE "August 07, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -123,9 +123,6 @@ TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) -#define TX_BUFFS_AVAIL(TP) \ - ((TP)->tx_pending - \ - (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } +static inline u32 tg3_tx_avail(struct tg3 *tp) +{ + smp_mb(); + return (tp->tx_pending - + ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) tp->tx_cons = sw_idx; - if (unlikely(netif_queue_stopped(tp->dev))) { - spin_lock(&tp->tx_lock); + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(tp->dev) && + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { + netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); + netif_tx_unlock(tp->dev); } } @@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = dev_alloc_skb(skb_size); + skb = netdev_alloc_skb(tp->dev, skb_size); if (skb == NULL) return -ENOMEM; - skb->dev = tp->dev; skb_reserve(skb, tp->rx_offset); mapping = pci_map_single(tp->pdev, skb->data, @@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget) tg3_recycle_rx(tp, opaque_key, desc_idx, *post_ptr); - copy_skb = dev_alloc_skb(len + 2); + copy_skb = netdev_alloc_skb(tp->dev, len + 2); if (copy_skb == NULL) goto drop_it_no_recycle; - copy_skb->dev = tp->dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) struct sk_buff *segs, *nskb; /* Estimate the number of fragments in the worst case */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { netif_stop_queue(tp->dev); return NETDEV_TX_BUSY; } @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -8618,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) err = -EIO; tx_len = 1514; - skb = dev_alloc_skb(tx_len); + skb = netdev_alloc_skb(tp->dev, tx_len); if (!skb) return -ENOMEM; @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c88..3ecf356cfb08 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2079,9 +2079,9 @@ struct tg3 { * lock: Held during reset, PHY access, timer, and when * updating tg3_flags and tg3_flags2. * - * tx_lock: Held during tg3_start_xmit and tg3_tx only - * when calling netif_[start|stop]_queue. - * tg3_start_xmit is protected by netif_tx_lock. + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. * * Both of these locks are to be held with BH safety. * @@ -2118,8 +2118,6 @@ struct tg3 { u32 tx_cons; u32 tx_pending; - spinlock_t tx_lock; - struct tg3_tx_buffer_desc *tx_ring; struct tx_ring_info *tx_buffers; dma_addr_t tx_desc_mapping; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9f491563944e..4470025ff7f8 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ /* version and credits */ #ifndef PCMCIA -static char version[] __initdata = +static char version[] __devinitdata = "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" @@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; static int __devinitdata turbo_searched = 0; #ifndef PCMCIA -static __u32 ibmtr_mem_base __initdata = 0xd0000; +static __u32 ibmtr_mem_base __devinitdata = 0xd0000; #endif static void __devinit PrtChanID(char *pcid, short stride) diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cd2e0251e2bc..85a7f797d343 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param(ringspeed, int, 0); -static struct net_device *setup_card(int n) +static struct net_device * __init setup_card(int n) { struct net_device *dev = alloc_trdev(sizeof(struct net_local)); int err; @@ -5696,9 +5696,8 @@ out: free_netdev(dev); return ERR_PTR(err); } - -int init_module(void) +int __init init_module(void) { int i, found = 0; struct net_device *dev; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 7f414815cc62..eba9083da146 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include <asm/irq.h> /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" KERN_INFO " http://www.scyld.com/network/drivers.html\n"; diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index f874e4f6ccf6..cf43390d2c80 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p static int __init xircom_init(void) { - pci_register_driver(&xircom_ops); - return 0; + return pci_register_driver(&xircom_ops); } static void __exit xircom_exit(void) diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c new file mode 100644 index 000000000000..47f49ef72bdc --- /dev/null +++ b/drivers/net/ucc_geth.c @@ -0,0 +1,4278 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * QE UCC Gigabit Ethernet Driver + * + * Changelog: + * Jul 6, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/mii.h> + +#include <asm/uaccess.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" + +#undef DEBUG + +#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006" +#define DRV_NAME "ucc_geth" + +#define ugeth_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugeth_dbg(format, arg...) \ + ugeth_printk(KERN_DEBUG , format , ## arg) +#define ugeth_err(format, arg...) \ + ugeth_printk(KERN_ERR , format , ## arg) +#define ugeth_info(format, arg...) \ + ugeth_printk(KERN_INFO , format , ## arg) +#define ugeth_warn(format, arg...) \ + ugeth_printk(KERN_WARNING , format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugeth_vdbg ugeth_dbg +#else +#define ugeth_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static DEFINE_SPINLOCK(ugeth_lock); + +static ucc_geth_info_t ugeth_primary_info = { + .uf_info = { + .bd_mem_part = MEM_PART_SYSTEM, + .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, + .max_rx_buf_length = 1536, +/* FIXME: should be changed in run time for 1G and 100M */ +#ifdef CONFIG_UGETH_HAS_GIGA + .urfs = UCC_GETH_URFS_GIGA_INIT, + .urfet = UCC_GETH_URFET_GIGA_INIT, + .urfset = UCC_GETH_URFSET_GIGA_INIT, + .utfs = UCC_GETH_UTFS_GIGA_INIT, + .utfet = UCC_GETH_UTFET_GIGA_INIT, + .utftt = UCC_GETH_UTFTT_GIGA_INIT, +#else + .urfs = UCC_GETH_URFS_INIT, + .urfet = UCC_GETH_URFET_INIT, + .urfset = UCC_GETH_URFSET_INIT, + .utfs = UCC_GETH_UTFS_INIT, + .utfet = UCC_GETH_UTFET_INIT, + .utftt = UCC_GETH_UTFTT_INIT, +#endif + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + .numQueuesTx = 1, + .numQueuesRx = 1, + .extendedFilteringChainPointer = ((uint32_t) NULL), + .typeorlen = 3072 /*1536 */ , + .nonBackToBackIfgPart1 = 0x40, + .nonBackToBackIfgPart2 = 0x60, + .miminumInterFrameGapEnforcement = 0x50, + .backToBackInterFrameGap = 0x60, + .mblinterval = 128, + .nortsrbytetime = 5, + .fracsiz = 1, + .strictpriorityq = 0xff, + .altBebTruncation = 0xa, + .excessDefer = 1, + .maxRetransmission = 0xf, + .collisionWindow = 0x37, + .receiveFlowControl = 1, + .maxGroupAddrInHash = 4, + .maxIndAddrInHash = 4, + .prel = 7, + .maxFrameLength = 1518, + .minFrameLength = 64, + .maxD1Length = 1520, + .maxD2Length = 1520, + .vlantype = 0x8100, + .ecamptr = ((uint32_t) NULL), + .eventRegMask = UCCE_OTHER, + .pausePeriod = 0xf000, + .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, + .bdRingLenTx = { + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN}, + + .bdRingLenRx = { + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN}, + + .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, + .largestexternallookupkeysize = + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, + .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, + .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, + .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, + .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, + .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, + .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, + .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4, + .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4, + .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, + .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, +}; + +static ucc_geth_info_t ugeth_info[8]; + +#ifdef DEBUG +static void mem_disp(u8 *addr, int size) +{ + u8 *i; + int size16Aling = (size >> 4) << 4; + int size4Aling = (size >> 2) << 2; + int notAlign = 0; + if (size % 16) + notAlign = 1; + + for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) + printk("0x%08x: %08x %08x %08x %08x\r\n", + (u32) i, + *((u32 *) (i)), + *((u32 *) (i + 4)), + *((u32 *) (i + 8)), *((u32 *) (i + 12))); + if (notAlign == 1) + printk("0x%08x: ", (u32) i); + for (; (u32) i < (u32) addr + size4Aling; i += 4) + printk("%08x ", *((u32 *) (i))); + for (; (u32) i < (u32) addr + size; i++) + printk("%02x", *((u8 *) (i))); + if (notAlign == 1) + printk("\r\n"); +} +#endif /* DEBUG */ + +#ifdef CONFIG_UGETH_FILTERING +static void enqueue(struct list_head *node, struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + list_add_tail(node, lh); + spin_unlock_irqrestore(ugeth_lock, flags); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static struct list_head *dequeue(struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + if (!list_empty(lh)) { + struct list_head *node = lh->next; + list_del(node); + spin_unlock_irqrestore(ugeth_lock, flags); + return node; + } else { + spin_unlock_irqrestore(ugeth_lock, flags); + return NULL; + } +} + +static int get_interface_details(enet_interface_e enet_interface, + enet_speed_e *speed, + int *r10m, + int *rmm, + int *rpm, + int *tbi, int *limited_to_full_duplex) +{ + /* Analyze enet_interface according to Interface Mode + Configuration table */ + switch (enet_interface) { + case ENET_10_MII: + *speed = ENET_SPEED_10BT; + break; + case ENET_10_RMII: + *speed = ENET_SPEED_10BT; + *r10m = 1; + *rmm = 1; + break; + case ENET_10_RGMII: + *speed = ENET_SPEED_10BT; + *rpm = 1; + *r10m = 1; + *limited_to_full_duplex = 1; + break; + case ENET_100_MII: + *speed = ENET_SPEED_100BT; + break; + case ENET_100_RMII: + *speed = ENET_SPEED_100BT; + *rmm = 1; + break; + case ENET_100_RGMII: + *speed = ENET_SPEED_100BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_GMII: + *speed = ENET_SPEED_1000BT; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RGMII: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_TBI: + *speed = ENET_SPEED_1000BT; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RTBI: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + default: + return -EINVAL; + break; + } + + return 0; +} + +static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd) +{ + struct sk_buff *skb = NULL; + + skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); + + if (skb == NULL) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + UCC_GETH_RX_DATA_BUF_ALIGNMENT - + (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - + 1))); + + skb->dev = ugeth->dev; + + BD_BUFFER_SET(bd, + dma_map_single(NULL, + skb->data, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE)); + + BD_STATUS_AND_LENGTH_SET(bd, + (R_E | R_I | + (BD_STATUS_AND_LENGTH(bd) & R_W))); + + return skb; +} + +static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ) +{ + u8 *bd; + u32 bd_status; + struct sk_buff *skb; + int i; + + bd = ugeth->p_rx_bd_ring[rxQ]; + i = 0; + + do { + bd_status = BD_STATUS_AND_LENGTH(bd); + skb = get_new_skb(ugeth, bd); + + if (!skb) /* If can not allocate data buffer, + abort. Cleanup will be elsewhere */ + return -ENOMEM; + + ugeth->rx_skbuff[rxQ][i] = skb; + + /* advance the BD pointer */ + bd += UCC_GETH_SIZE_OF_BD; + i++; + } while (!(bd_status & R_W)); + + return 0; +} + +static int fill_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + u32 thread_alignment, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + if ((snum = qe_get_snum()) < 0) { + ugeth_err("fill_init_enet_entries: Can not get SNUM."); + return snum; + } + if ((i == 0) && skip_page_for_first_entry) + /* First entry of Rx does not have page */ + init_enet_offset = 0; + else { + init_enet_offset = + qe_muram_alloc(thread_size, thread_alignment); + if (IS_MURAM_ERR(init_enet_offset)) { + ugeth_err + ("fill_init_enet_entries: Can not allocate DPRAM memory."); + qe_put_snum((u8) snum); + return -ENOMEM; + } + } + *(p_start++) = + ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset + | risc; + } + + return 0; +} + +static int return_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + qe_muram_free(init_enet_offset); + } + *(p_start++) = 0; /* Just for cosmetics */ + } + } + + return 0; +} + +#ifdef DEBUG +static int dump_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + ugeth_info("Init enet entry %d:", i); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(init_enet_offset)); + mem_disp(qe_muram_addr(init_enet_offset), + thread_size); + } + p_start++; + } + } + + return 0; +} +#endif + +#ifdef CONFIG_UGETH_FILTERING +static enet_addr_container_t *get_enet_addr_container(void) +{ + enet_addr_container_t *enet_addr_cont; + + /* allocate memory */ + enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL); + if (!enet_addr_cont) { + ugeth_err("%s: No memory for enet_addr_container_t object.", + __FUNCTION__); + return NULL; + } + + return enet_addr_cont; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont) +{ + kfree(enet_addr_cont); +} + +#ifdef CONFIG_UGETH_FILTERING +static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Ethernet frames are defined in Little Endian mode, */ + /* therefore to insert the address we reverse the bytes. */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Writing address ff.ff.ff.ff.ff.ff disables address + recognition for this register */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); + + return 0; +} + +static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + u32 cecr_subblock; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + + /* Ethernet frames are defined in Little Endian mode, + therefor to insert */ + /* the address to the hash (Big Endian mode), we reverse the bytes.*/ + out_be16(&p_82xx_addr_filt->taddr.h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->taddr.m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->taddr.l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); +} + +#ifdef CONFIG_UGETH_MAGIC_PACKET +static void magic_packet_detection_enable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Enable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm |= UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Enable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 |= MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} + +static void magic_packet_detection_disable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Disable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm &= ~UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Disable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} +#endif /* MAGIC_PACKET */ + +static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2) +{ + return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); +} + +#ifdef DEBUG +static void get_statistics(ucc_geth_private_t *ugeth, + ucc_geth_tx_firmware_statistics_t * + tx_firmware_statistics, + ucc_geth_rx_firmware_statistics_t * + rx_firmware_statistics, + ucc_geth_hardware_statistics_t *hardware_statistics) +{ + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + + ug_regs = ugeth->ug_regs; + uf_regs = (ucc_fast_t *) ug_regs; + p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; + p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; + + /* Tx firmware only if user handed pointer and driver actually + gathers Tx firmware statistics */ + if (tx_firmware_statistics && p_tx_fw_statistics_pram) { + tx_firmware_statistics->sicoltx = + in_be32(&p_tx_fw_statistics_pram->sicoltx); + tx_firmware_statistics->mulcoltx = + in_be32(&p_tx_fw_statistics_pram->mulcoltx); + tx_firmware_statistics->latecoltxfr = + in_be32(&p_tx_fw_statistics_pram->latecoltxfr); + tx_firmware_statistics->frabortduecol = + in_be32(&p_tx_fw_statistics_pram->frabortduecol); + tx_firmware_statistics->frlostinmactxer = + in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); + tx_firmware_statistics->carriersenseertx = + in_be32(&p_tx_fw_statistics_pram->carriersenseertx); + tx_firmware_statistics->frtxok = + in_be32(&p_tx_fw_statistics_pram->frtxok); + tx_firmware_statistics->txfrexcessivedefer = + in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); + tx_firmware_statistics->txpkts256 = + in_be32(&p_tx_fw_statistics_pram->txpkts256); + tx_firmware_statistics->txpkts512 = + in_be32(&p_tx_fw_statistics_pram->txpkts512); + tx_firmware_statistics->txpkts1024 = + in_be32(&p_tx_fw_statistics_pram->txpkts1024); + tx_firmware_statistics->txpktsjumbo = + in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); + } + + /* Rx firmware only if user handed pointer and driver actually + * gathers Rx firmware statistics */ + if (rx_firmware_statistics && p_rx_fw_statistics_pram) { + int i; + rx_firmware_statistics->frrxfcser = + in_be32(&p_rx_fw_statistics_pram->frrxfcser); + rx_firmware_statistics->fraligner = + in_be32(&p_rx_fw_statistics_pram->fraligner); + rx_firmware_statistics->inrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); + rx_firmware_statistics->outrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); + rx_firmware_statistics->frtoolong = + in_be32(&p_rx_fw_statistics_pram->frtoolong); + rx_firmware_statistics->runt = + in_be32(&p_rx_fw_statistics_pram->runt); + rx_firmware_statistics->verylongevent = + in_be32(&p_rx_fw_statistics_pram->verylongevent); + rx_firmware_statistics->symbolerror = + in_be32(&p_rx_fw_statistics_pram->symbolerror); + rx_firmware_statistics->dropbsy = + in_be32(&p_rx_fw_statistics_pram->dropbsy); + for (i = 0; i < 0x8; i++) + rx_firmware_statistics->res0[i] = + p_rx_fw_statistics_pram->res0[i]; + rx_firmware_statistics->mismatchdrop = + in_be32(&p_rx_fw_statistics_pram->mismatchdrop); + rx_firmware_statistics->underpkts = + in_be32(&p_rx_fw_statistics_pram->underpkts); + rx_firmware_statistics->pkts256 = + in_be32(&p_rx_fw_statistics_pram->pkts256); + rx_firmware_statistics->pkts512 = + in_be32(&p_rx_fw_statistics_pram->pkts512); + rx_firmware_statistics->pkts1024 = + in_be32(&p_rx_fw_statistics_pram->pkts1024); + rx_firmware_statistics->pktsjumbo = + in_be32(&p_rx_fw_statistics_pram->pktsjumbo); + rx_firmware_statistics->frlossinmacer = + in_be32(&p_rx_fw_statistics_pram->frlossinmacer); + rx_firmware_statistics->pausefr = + in_be32(&p_rx_fw_statistics_pram->pausefr); + for (i = 0; i < 0x4; i++) + rx_firmware_statistics->res1[i] = + p_rx_fw_statistics_pram->res1[i]; + rx_firmware_statistics->removevlan = + in_be32(&p_rx_fw_statistics_pram->removevlan); + rx_firmware_statistics->replacevlan = + in_be32(&p_rx_fw_statistics_pram->replacevlan); + rx_firmware_statistics->insertvlan = + in_be32(&p_rx_fw_statistics_pram->insertvlan); + } + + /* Hardware only if user handed pointer and driver actually + gathers hardware statistics */ + if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { + hardware_statistics->tx64 = in_be32(&ug_regs->tx64); + hardware_statistics->tx127 = in_be32(&ug_regs->tx127); + hardware_statistics->tx255 = in_be32(&ug_regs->tx255); + hardware_statistics->rx64 = in_be32(&ug_regs->rx64); + hardware_statistics->rx127 = in_be32(&ug_regs->rx127); + hardware_statistics->rx255 = in_be32(&ug_regs->rx255); + hardware_statistics->txok = in_be32(&ug_regs->txok); + hardware_statistics->txcf = in_be16(&ug_regs->txcf); + hardware_statistics->tmca = in_be32(&ug_regs->tmca); + hardware_statistics->tbca = in_be32(&ug_regs->tbca); + hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); + hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); + hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); + hardware_statistics->rmca = in_be32(&ug_regs->rmca); + hardware_statistics->rbca = in_be32(&ug_regs->rbca); + } +} + +static void dump_bds(ucc_geth_private_t *ugeth) +{ + int i; + int length; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + if (ugeth->p_tx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenTx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("TX BDs[%d]", i); + mem_disp(ugeth->p_tx_bd_ring[i], length); + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenRx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("RX BDs[%d]", i); + mem_disp(ugeth->p_rx_bd_ring[i], length); + } + } +} + +static void dump_regs(ucc_geth_private_t *ugeth) +{ + int i; + + ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); + ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); + + ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg1, + in_be32(&ugeth->ug_regs->maccfg1)); + ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg2, + in_be32(&ugeth->ug_regs->maccfg2)); + ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ipgifg, + in_be32(&ugeth->ug_regs->ipgifg)); + ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->hafdup, + in_be32(&ugeth->ug_regs->hafdup)); + ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcfg, + in_be32(&ugeth->ug_regs->miimng.miimcfg)); + ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcom, + in_be32(&ugeth->ug_regs->miimng.miimcom)); + ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimadd, + in_be32(&ugeth->ug_regs->miimng.miimadd)); + ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcon, + in_be32(&ugeth->ug_regs->miimng.miimcon)); + ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimstat, + in_be32(&ugeth->ug_regs->miimng.miimstat)); + ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimind, + in_be32(&ugeth->ug_regs->miimng.miimind)); + ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifctl, + in_be32(&ugeth->ug_regs->ifctl)); + ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifstat, + in_be32(&ugeth->ug_regs->ifstat)); + ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr1, + in_be32(&ugeth->ug_regs->macstnaddr1)); + ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr2, + in_be32(&ugeth->ug_regs->macstnaddr2)); + ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->uempr, + in_be32(&ugeth->ug_regs->uempr)); + ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->utbipar, + in_be32(&ugeth->ug_regs->utbipar)); + ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->uescr, + in_be16(&ugeth->ug_regs->uescr)); + ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx64, + in_be32(&ugeth->ug_regs->tx64)); + ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx127, + in_be32(&ugeth->ug_regs->tx127)); + ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx255, + in_be32(&ugeth->ug_regs->tx255)); + ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx64, + in_be32(&ugeth->ug_regs->rx64)); + ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx127, + in_be32(&ugeth->ug_regs->rx127)); + ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx255, + in_be32(&ugeth->ug_regs->rx255)); + ugeth_info("txok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->txok, + in_be32(&ugeth->ug_regs->txok)); + ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->txcf, + in_be16(&ugeth->ug_regs->txcf)); + ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tmca, + in_be32(&ugeth->ug_regs->tmca)); + ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tbca, + in_be32(&ugeth->ug_regs->tbca)); + ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxfok, + in_be32(&ugeth->ug_regs->rxfok)); + ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxbok, + in_be32(&ugeth->ug_regs->rxbok)); + ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbyt, + in_be32(&ugeth->ug_regs->rbyt)); + ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rmca, + in_be32(&ugeth->ug_regs->rmca)); + ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbca, + in_be32(&ugeth->ug_regs->rbca)); + ugeth_info("scar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scar, + in_be32(&ugeth->ug_regs->scar)); + ugeth_info("scam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scam, + in_be32(&ugeth->ug_regs->scam)); + + if (ugeth->p_thread_data_tx) { + int numThreadsTxNumerical; + switch (ugeth->ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + numThreadsTxNumerical = 0; + break; + } + + ugeth_info("Thread data TXs:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_tx); + for (i = 0; i < numThreadsTxNumerical; i++) { + ugeth_info("Thread data TX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_tx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_tx[i], + sizeof(ucc_geth_thread_data_tx_t)); + } + } + if (ugeth->p_thread_data_rx) { + int numThreadsRxNumerical; + switch (ugeth->ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + numThreadsRxNumerical = 0; + break; + } + + ugeth_info("Thread data RX:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_rx); + for (i = 0; i < numThreadsRxNumerical; i++) { + ugeth_info("Thread data RX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_rx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_rx[i], + sizeof(ucc_geth_thread_data_rx_t)); + } + } + if (ugeth->p_exf_glbl_param) { + ugeth_info("EXF global param:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_exf_glbl_param); + mem_disp((u8 *) ugeth->p_exf_glbl_param, + sizeof(*ugeth->p_exf_glbl_param)); + } + if (ugeth->p_tx_glbl_pram) { + ugeth_info("TX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); + ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_tx_glbl_pram->temoder, + in_be16(&ugeth->p_tx_glbl_pram->temoder)); + ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->sqptr, + in_be32(&ugeth->p_tx_glbl_pram->sqptr)); + ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, + in_be32(&ugeth->p_tx_glbl_pram-> + schedulerbasepointer)); + ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, + in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); + ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tstate, + in_be32(&ugeth->p_tx_glbl_pram->tstate)); + ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], + ugeth->p_tx_glbl_pram->iphoffset[0]); + ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], + ugeth->p_tx_glbl_pram->iphoffset[1]); + ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], + ugeth->p_tx_glbl_pram->iphoffset[2]); + ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], + ugeth->p_tx_glbl_pram->iphoffset[3]); + ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], + ugeth->p_tx_glbl_pram->iphoffset[4]); + ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], + ugeth->p_tx_glbl_pram->iphoffset[5]); + ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], + ugeth->p_tx_glbl_pram->iphoffset[6]); + ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], + ugeth->p_tx_glbl_pram->iphoffset[7]); + ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); + ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); + ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); + ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); + ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); + ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); + ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); + ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); + ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tqptr, + in_be32(&ugeth->p_tx_glbl_pram->tqptr)); + } + if (ugeth->p_rx_glbl_pram) { + ugeth_info("RX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); + ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->remoder, + in_be32(&ugeth->p_rx_glbl_pram->remoder)); + ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rqptr, + in_be32(&ugeth->p_rx_glbl_pram->rqptr)); + ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->typeorlen, + in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); + ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rxgstpack, + ugeth->p_rx_glbl_pram->rxgstpack); + ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, + in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); + ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, + in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); + ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rstate, + ugeth->p_rx_glbl_pram->rstate); + ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mrblr, + in_be16(&ugeth->p_rx_glbl_pram->mrblr)); + ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rbdqptr, + in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); + ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mflr, + in_be16(&ugeth->p_rx_glbl_pram->mflr)); + ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->minflr, + in_be16(&ugeth->p_rx_glbl_pram->minflr)); + ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd1, + in_be16(&ugeth->p_rx_glbl_pram->maxd1)); + ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd2, + in_be16(&ugeth->p_rx_glbl_pram->maxd2)); + ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->ecamptr, + in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); + ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l2qt, + in_be32(&ugeth->p_rx_glbl_pram->l2qt)); + ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[0], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); + ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[1], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); + ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[2], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); + ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[3], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); + ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[4], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); + ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[5], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); + ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[6], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); + ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[7], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); + ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantype, + in_be16(&ugeth->p_rx_glbl_pram->vlantype)); + ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantci, + in_be16(&ugeth->p_rx_glbl_pram->vlantci)); + for (i = 0; i < 64; i++) + ugeth_info + ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", + i, + (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], + ugeth->p_rx_glbl_pram->addressfiltering[i]); + ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, + in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); + } + if (ugeth->p_send_q_mem_reg) { + ugeth_info("Send Q memory registers:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_send_q_mem_reg); + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + ugeth_info("SQQD[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); + mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], + sizeof(ucc_geth_send_queue_qd_t)); + } + } + if (ugeth->p_scheduler) { + ugeth_info("Scheduler:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); + mem_disp((u8 *) ugeth->p_scheduler, + sizeof(*ugeth->p_scheduler)); + } + if (ugeth->p_tx_fw_statistics_pram) { + ugeth_info("TX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_tx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, + sizeof(*ugeth->p_tx_fw_statistics_pram)); + } + if (ugeth->p_rx_fw_statistics_pram) { + ugeth_info("RX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, + sizeof(*ugeth->p_rx_fw_statistics_pram)); + } + if (ugeth->p_rx_irq_coalescing_tbl) { + ugeth_info("RX IRQ coalescing tables:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_irq_coalescing_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX IRQ coalescing table entry[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]); + ugeth_info + ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingmaxvalue, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingmaxvalue)); + ugeth_info + ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingcounter, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingcounter)); + } + } + if (ugeth->p_rx_bd_qs_tbl) { + ugeth_info("RX BD QS tables:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX BD QS table[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i]); + ugeth_info + ("bdbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); + ugeth_info + ("bdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); + ugeth_info + ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i]. + externalbdbaseptr)); + ugeth_info + ("externalbdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); + ugeth_info("ucode RX Prefetched BDs:"); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr))); + mem_disp((u8 *) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr)), + sizeof(ucc_geth_rx_prefetched_bds_t)); + } + } + if (ugeth->p_init_enet_param_shadow) { + int size; + ugeth_info("Init enet param shadow:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_init_enet_param_shadow); + mem_disp((u8 *) ugeth->p_init_enet_param_shadow, + sizeof(*ugeth->p_init_enet_param_shadow)); + + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ugeth->ug_info->rxExtendedFiltering) { + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + sizeof(ucc_geth_thread_tx_pram_t), + ugeth->ug_info->riscTx, 0); + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, size, + ugeth->ug_info->riscRx, 1); + } +} +#endif /* DEBUG */ + +static void init_default_reg_vals(volatile u32 *upsmr_register, + volatile u32 *maccfg1_register, + volatile u32 *maccfg2_register) +{ + out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); + out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); + out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); +} + +static int init_half_duplex_params(int alt_beb, + int back_pressure_no_backoff, + int no_backoff, + int excess_defer, + u8 alt_beb_truncation, + u8 max_retransmissions, + u8 collision_window, + volatile u32 *hafdup_register) +{ + u32 value = 0; + + if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || + (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || + (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) + return -EINVAL; + + value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); + + if (alt_beb) + value |= HALFDUP_ALT_BEB; + if (back_pressure_no_backoff) + value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; + if (no_backoff) + value |= HALFDUP_NO_BACKOFF; + if (excess_defer) + value |= HALFDUP_EXCESSIVE_DEFER; + + value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); + + value |= collision_window; + + out_be32(hafdup_register, value); + return 0; +} + +static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, + u8 non_btb_ipg, + u8 min_ifg, + u8 btb_ipg, + volatile u32 *ipgifg_register) +{ + u32 value = 0; + + /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back + IPG part 2 */ + if (non_btb_cs_ipg > non_btb_ipg) + return -EINVAL; + + if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || + (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || + /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ + (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) + return -EINVAL; + + value |= + ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & + IPGIFG_NBTB_CS_IPG_MASK); + value |= + ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & + IPGIFG_NBTB_IPG_MASK); + value |= + ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & + IPGIFG_MIN_IFG_MASK); + value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); + + out_be32(ipgifg_register, value); + return 0; +} + +static int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, + u16 pause_period, + u16 extension_field, + volatile u32 *upsmr_register, + volatile u32 *uempr_register, + volatile u32 *maccfg1_register) +{ + u32 value = 0; + + /* Set UEMPR register */ + value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; + value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; + out_be32(uempr_register, value); + + /* Set UPSMR register */ + value = in_be32(upsmr_register); + value |= automatic_flow_control_mode; + out_be32(upsmr_register, value); + + value = in_be32(maccfg1_register); + if (rx_flow_control_enable) + value |= MACCFG1_FLOW_RX; + if (tx_flow_control_enable) + value |= MACCFG1_FLOW_TX; + out_be32(maccfg1_register, value); + + return 0; +} + +static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, + int auto_zero_hardware_statistics, + volatile u32 *upsmr_register, + volatile u16 *uescr_register) +{ + u32 upsmr_value = 0; + u16 uescr_value = 0; + /* Enable hardware statistics gathering if requested */ + if (enable_hardware_statistics) { + upsmr_value = in_be32(upsmr_register); + upsmr_value |= UPSMR_HSE; + out_be32(upsmr_register, upsmr_value); + } + + /* Clear hardware statistics counters */ + uescr_value = in_be16(uescr_register); + uescr_value |= UESCR_CLRCNT; + /* Automatically zero hardware statistics counters on read, + if requested */ + if (auto_zero_hardware_statistics) + uescr_value |= UESCR_AUTOZ; + out_be16(uescr_register, uescr_value); + + return 0; +} + +static int init_firmware_statistics_gathering_mode(int + enable_tx_firmware_statistics, + int enable_rx_firmware_statistics, + volatile u32 *tx_rmon_base_ptr, + u32 tx_firmware_statistics_structure_address, + volatile u32 *rx_rmon_base_ptr, + u32 rx_firmware_statistics_structure_address, + volatile u16 *temoder_register, + volatile u32 *remoder_register) +{ + /* Note: this function does not check if */ + /* the parameters it receives are NULL */ + u16 temoder_value; + u32 remoder_value; + + if (enable_tx_firmware_statistics) { + out_be32(tx_rmon_base_ptr, + tx_firmware_statistics_structure_address); + temoder_value = in_be16(temoder_register); + temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; + out_be16(temoder_register, temoder_value); + } + + if (enable_rx_firmware_statistics) { + out_be32(rx_rmon_base_ptr, + rx_firmware_statistics_structure_address); + remoder_value = in_be32(remoder_register); + remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; + out_be32(remoder_register, remoder_value); + } + + return 0; +} + +static int init_mac_station_addr_regs(u8 address_byte_0, + u8 address_byte_1, + u8 address_byte_2, + u8 address_byte_3, + u8 address_byte_4, + u8 address_byte_5, + volatile u32 *macstnaddr1_register, + volatile u32 *macstnaddr2_register) +{ + u32 value = 0; + + /* Example: for a station address of 0x12345678ABCD, */ + /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ + + /* MACSTNADDR1 Register: */ + + /* 0 7 8 15 */ + /* station address byte 5 station address byte 4 */ + /* 16 23 24 31 */ + /* station address byte 3 station address byte 2 */ + value |= (u32) ((address_byte_2 << 0) & 0x000000FF); + value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); + value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_5 << 24) & 0xFF000000); + + out_be32(macstnaddr1_register, value); + + /* MACSTNADDR2 Register: */ + + /* 0 7 8 15 */ + /* station address byte 1 station address byte 0 */ + /* 16 23 24 31 */ + /* reserved reserved */ + value = 0; + value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_1 << 24) & 0xFF000000); + + out_be32(macstnaddr2_register, value); + + return 0; +} + +static int init_mac_duplex_mode(int full_duplex, + int limited_to_full_duplex, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + /* some interfaces must work in full duplex mode */ + if ((full_duplex == 0) && (limited_to_full_duplex == 1)) + return -EINVAL; + + value = in_be32(maccfg2_register); + + if (full_duplex) + value |= MACCFG2_FDX; + else + value &= ~MACCFG2_FDX; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_check_frame_length_mode(int length_check, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + value = in_be32(maccfg2_register); + + if (length_check) + value |= MACCFG2_LC; + else + value &= ~MACCFG2_LC; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_preamble_length(u8 preamble_length, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + if ((preamble_length < 3) || (preamble_length > 7)) + return -EINVAL; + + value = in_be32(maccfg2_register); + value &= ~MACCFG2_PREL_MASK; + value |= (preamble_length << MACCFG2_PREL_SHIFT); + out_be32(maccfg2_register, value); + return 0; +} + +static int init_mii_management_configuration(int reset_mgmt, + int preamble_supress, + volatile u32 *miimcfg_register, + volatile u32 *miimind_register) +{ + unsigned int timeout = PHY_INIT_TIMEOUT; + u32 value = 0; + + value = in_be32(miimcfg_register); + if (reset_mgmt) { + value |= MIIMCFG_RESET_MANAGEMENT; + out_be32(miimcfg_register, value); + } + + value = 0; + + if (preamble_supress) + value |= MIIMCFG_NO_PREAMBLE; + + value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT; + out_be32(miimcfg_register, value); + + /* Wait until the bus is free */ + while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--) + cpu_relax(); + + if (timeout <= 0) { + ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__); + return -ETIMEDOUT; + } + + return 0; +} + +static int init_rx_parameters(int reject_broadcast, + int receive_short_frames, + int promiscuous, volatile u32 *upsmr_register) +{ + u32 value = 0; + + value = in_be32(upsmr_register); + + if (reject_broadcast) + value |= UPSMR_BRO; + else + value &= ~UPSMR_BRO; + + if (receive_short_frames) + value |= UPSMR_RSH; + else + value &= ~UPSMR_RSH; + + if (promiscuous) + value |= UPSMR_PRO; + else + value &= ~UPSMR_PRO; + + out_be32(upsmr_register, value); + + return 0; +} + +static int init_max_rx_buff_len(u16 max_rx_buf_len, + volatile u16 *mrblr_register) +{ + /* max_rx_buf_len value must be a multiple of 128 */ + if ((max_rx_buf_len == 0) + || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) + return -EINVAL; + + out_be16(mrblr_register, max_rx_buf_len); + return 0; +} + +static int init_min_frame_len(u16 min_frame_length, + volatile u16 *minflr_register, + volatile u16 *mrblr_register) +{ + u16 mrblr_value = 0; + + mrblr_value = in_be16(mrblr_register); + if (min_frame_length >= (mrblr_value - 4)) + return -EINVAL; + + out_be16(minflr_register, min_frame_length); + return 0; +} + +static int adjust_enet_interface(ucc_geth_private_t *ugeth) +{ + ucc_geth_info_t *ug_info; + ucc_geth_t *ug_regs; + ucc_fast_t *uf_regs; + enet_speed_e speed; + int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm = + 0, limited_to_full_duplex = 0; + u32 upsmr, maccfg2, utbipar, tbiBaseAddress; + u16 value; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + ug_regs = ugeth->ug_regs; + uf_regs = ugeth->uccf->uf_regs; + + /* Analyze enet_interface according to Interface Mode Configuration + table */ + ret_val = + get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm, + &rpm, &tbi, &limited_to_full_duplex); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + /* Set MACCFG2 */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT)) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == ENET_SPEED_1000BT) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + maccfg2 |= ug_info->padAndCrc; + out_be32(&ug_regs->maccfg2, maccfg2); + + /* Set UPSMR */ + upsmr = in_be32(&uf_regs->upsmr); + upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); + if (rpm) + upsmr |= UPSMR_RPM; + if (r10m) + upsmr |= UPSMR_R10M; + if (tbi) + upsmr |= UPSMR_TBIM; + if (rmm) + upsmr |= UPSMR_RMM; + out_be32(&uf_regs->upsmr, upsmr); + + /* Set UTBIPAR */ + utbipar = in_be32(&ug_regs->utbipar); + utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; + if (tbi) + utbipar |= + (ug_info->phy_address + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + else + utbipar |= + (0x10 + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + out_be32(&ug_regs->utbipar, utbipar); + + /* Disable autonegotiation in tbi mode, because by default it + comes up in autonegotiation mode. */ + /* Note that this depends on proper setting in utbipar register. */ + if (tbi) { + tbiBaseAddress = in_be32(&ug_regs->utbipar); + tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; + tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; + value = + ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR, value); + } + + ret_val = init_mac_duplex_mode(1, + limited_to_full_duplex, + &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); + + ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: Preamble length must be between 3 and 7 inclusive.", + __FUNCTION__); + return ret_val; + } + + return 0; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the ugeth structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + u32 tempval; + struct ugeth_mii_info *mii_info = ugeth->mii_info; + + ug_regs = ugeth->ug_regs; + + if (mii_info->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (mii_info->duplex != ugeth->oldduplex) { + if (!(mii_info->duplex)) { + tempval = in_be32(&ug_regs->maccfg2); + tempval &= ~(MACCFG2_FDX); + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Half Duplex", dev->name); + } else { + tempval = in_be32(&ug_regs->maccfg2); + tempval |= MACCFG2_FDX; + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Full Duplex", dev->name); + } + + ugeth->oldduplex = mii_info->duplex; + } + + if (mii_info->speed != ugeth->oldspeed) { + switch (mii_info->speed) { + case 1000: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this when it is fixed!!! */ + if (ugeth->ug_info->enet_interface == + ENET_1000_GMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval |= 0x000f; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } else if (ugeth->ug_info->enet_interface == + ENET_1000_RGMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + case 100: + case 10: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this lines when it will be fixed!!! */ + ugeth->ug_info->enet_interface = ENET_100_RGMII; + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | + BMCR_RESET)); + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + default: + ugeth_warn + ("%s: Ack! Speed (%d) is not 10/100/1000!", + dev->name, mii_info->speed); + break; + } + + ugeth_info("%s: Speed %dBT", dev->name, + mii_info->speed); + + ugeth->oldspeed = mii_info->speed; + } + + if (!ugeth->oldlink) { + ugeth_info("%s: Link is up", dev->name); + ugeth->oldlink = 1; + netif_carrier_on(dev); + netif_schedule(dev); + } + } else { + if (ugeth->oldlink) { + ugeth_info("%s: Link is down", dev->name); + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + netif_carrier_off(dev); + } + } +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + struct phy_info *curphy; + ucc_mii_mng_t *mii_regs; + struct ugeth_mii_info *mii_info; + int err; + + mii_regs = &ugeth->ug_regs->miimng; + + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + + mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL); + + if (NULL == mii_info) { + ugeth_err("%s: Could not allocate mii_info", dev->name); + return -ENOMEM; + } + + mii_info->mii_regs = mii_regs; + mii_info->speed = SPEED_1000; + mii_info->duplex = DUPLEX_FULL; + mii_info->pause = 0; + mii_info->link = 0; + + mii_info->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + mii_info->autoneg = 1; + + mii_info->mii_id = ugeth->ug_info->phy_address; + + mii_info->dev = dev; + + mii_info->mdio_read = &read_phy_reg; + mii_info->mdio_write = &write_phy_reg; + + ugeth->mii_info = mii_info; + + spin_lock_irq(&ugeth->lock); + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + if (init_mii_management_configuration(1, + ugeth->ug_info-> + miiPreambleSupress, + &mii_regs->miimcfg, + &mii_regs->miimind)) { + ugeth_err("%s: The MII Bus is stuck!", dev->name); + err = -1; + goto bus_fail; + } + + spin_unlock_irq(&ugeth->lock); + + /* get info for this PHY */ + curphy = get_phy_info(ugeth->mii_info); + + if (curphy == NULL) { + ugeth_err("%s: No PHY found", dev->name); + err = -1; + goto no_phy; + } + + mii_info->phyinfo = curphy; + + /* Run the commands which initialize the PHY */ + if (curphy->init) { + err = curphy->init(ugeth->mii_info); + if (err) + goto phy_init_fail; + } + + return 0; + + phy_init_fail: + no_phy: + bus_fail: + kfree(mii_info); + + return err; +} + +#ifdef CONFIG_UGETH_TX_ON_DEMOND +static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth) +{ + ucc_fast_transmit_on_demand(ugeth->uccf); + + return 0; +} +#endif + +static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u32 temp; + + uccf = ugeth->uccf; + + /* Mask GRACEFUL STOP TX interrupt bit and clear it */ + temp = in_be32(uccf->p_uccm); + temp &= ~UCCE_GRA; + out_be32(uccf->p_uccm, temp); + out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ + + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + /* Wait for command to complete */ + do { + temp = in_be32(uccf->p_ucce); + } while (!(temp & UCCE_GRA)); + + uccf->stopped_tx = 1; + + return 0; +} + +static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u8 temp; + + uccf = ugeth->uccf; + + /* Clear acknowledge bit */ + temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; + ugeth->p_rx_glbl_pram->rxgstpack = temp; + + /* Keep issuing command and checking acknowledge bit until + it is asserted, according to spec */ + do { + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. + ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + temp = ugeth->p_rx_glbl_pram->rxgstpack; + } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); + + uccf->stopped_rx = 1; + + return 0; +} + +static int ugeth_restart_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_tx = 0; + + return 0; +} + +static int ugeth_restart_rx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_rx = 0; + + return 0; +} + +static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + int enabled_tx, enabled_rx; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + enabled_tx = uccf->enabled_tx; + enabled_rx = uccf->enabled_rx; + + /* Get Tx and Rx going again, in case this channel was actively + disabled. */ + if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) + ugeth_restart_tx(ugeth); + if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) + ugeth_restart_rx(ugeth); + + ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ + + return 0; + +} + +static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + /* Stop any transmissions */ + if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) + ugeth_graceful_stop_tx(ugeth); + + /* Stop any receptions */ + if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) + ugeth_graceful_stop_rx(ugeth); + + ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ + + return 0; +} + +static void ugeth_dump_regs(ucc_geth_private_t *ugeth) +{ +#ifdef DEBUG + ucc_fast_dump_regs(ugeth->uccf); + dump_regs(ugeth); + dump_bds(ugeth); +#endif +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t * + p_UccGethTadParams, + qe_fltr_tad_t *qe_fltr_tad) +{ + u16 temp; + + /* Zero serialized TAD */ + memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); + + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ + if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || + (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (p_UccGethTadParams->vnontag_op != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) + ) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; + if (p_UccGethTadParams->reject_frame) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; + temp = + (u16) (((u16) p_UccGethTadParams-> + vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); + qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ + + qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ + if (p_UccGethTadParams->vnontag_op == + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) + qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; + qe_fltr_tad->serialized[1] |= + p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; + + qe_fltr_tad->serialized[2] |= + p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; + /* upper bits */ + qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); + /* lower bits */ + qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); + + return 0; +} + +static enet_addr_container_t + *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u16 i, num; + int32_t j; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + if (!p_lh) + return NULL; + + num = *p_counter; + + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { + if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) + break; + if (j == 0) + return enet_addr_cont; /* Found */ + } + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + return NULL; +} + +static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_enet_address_recognition_location_e location; + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u8 i; + u32 limit; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + limit = ugeth->ug_info->maxGroupAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + limit = ugeth->ug_info->maxIndAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; + p_counter = &(ugeth->numIndAddrInHash); + } + + if ((enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { + list_add(p_lh, &enet_addr_cont->node); /* Put it back */ + return 0; + } + if ((!p_lh) || (!(*p_counter < limit))) + return -EBUSY; + if (!(enet_addr_cont = get_enet_addr_container())) + return -ENOMEM; + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; + enet_addr_cont->location = location; + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + ++(*p_counter); + + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + + return 0; +} + +static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_container_t *enet_addr_cont; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + u16 i, num; + struct list_head *p_lh; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (! + (enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) + return -ENOENT; + + /* It's been found and removed from the CQ. */ + /* Now destroy its container */ + put_enet_addr_container(enet_addr_cont); + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + /* Add all remaining CQ elements back into hash */ + num = --(*p_counter); + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t * + ugeth, + enet_addr_type_e + enet_addr_type) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + struct list_head *p_lh; + u16 i, num; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } else + return -EINVAL; + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + if (!p_lh) + return 0; + + num = *p_counter; + + /* Delete all remaining CQ elements */ + for (i = 0; i < num; i++) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); + + *p_counter = 0; + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, + u8 paddr_num) +{ + int i; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) + ugeth_warn + ("%s: multicast address added to paddr will have no " + "effect - is this what you wanted?", + __FUNCTION__); + + ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ + /* store address in our database */ + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; + /* put in hardware */ + return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth, + u8 paddr_num) +{ + ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ + return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ +} + +static void ucc_geth_memclean(ucc_geth_private_t *ugeth) +{ + u16 i, j; + u8 *bd; + + if (!ugeth) + return; + + if (ugeth->uccf) + ucc_fast_free(ugeth->uccf); + + if (ugeth->p_thread_data_tx) { + qe_muram_free(ugeth->thread_dat_tx_offset); + ugeth->p_thread_data_tx = NULL; + } + if (ugeth->p_thread_data_rx) { + qe_muram_free(ugeth->thread_dat_rx_offset); + ugeth->p_thread_data_rx = NULL; + } + if (ugeth->p_exf_glbl_param) { + qe_muram_free(ugeth->exf_glbl_param_offset); + ugeth->p_exf_glbl_param = NULL; + } + if (ugeth->p_rx_glbl_pram) { + qe_muram_free(ugeth->rx_glbl_pram_offset); + ugeth->p_rx_glbl_pram = NULL; + } + if (ugeth->p_tx_glbl_pram) { + qe_muram_free(ugeth->tx_glbl_pram_offset); + ugeth->p_tx_glbl_pram = NULL; + } + if (ugeth->p_send_q_mem_reg) { + qe_muram_free(ugeth->send_q_mem_reg_offset); + ugeth->p_send_q_mem_reg = NULL; + } + if (ugeth->p_scheduler) { + qe_muram_free(ugeth->scheduler_offset); + ugeth->p_scheduler = NULL; + } + if (ugeth->p_tx_fw_statistics_pram) { + qe_muram_free(ugeth->tx_fw_statistics_pram_offset); + ugeth->p_tx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_fw_statistics_pram) { + qe_muram_free(ugeth->rx_fw_statistics_pram_offset); + ugeth->p_rx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_irq_coalescing_tbl) { + qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); + ugeth->p_rx_irq_coalescing_tbl = NULL; + } + if (ugeth->p_rx_bd_qs_tbl) { + qe_muram_free(ugeth->rx_bd_qs_tbl_offset); + ugeth->p_rx_bd_qs_tbl = NULL; + } + if (ugeth->p_init_enet_param_shadow) { + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, + ugeth->ug_info->riscRx, 1); + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + ugeth->ug_info->riscTx, 0); + kfree(ugeth->p_init_enet_param_shadow); + ugeth->p_init_enet_param_shadow = NULL; + } + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + bd = ugeth->p_tx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { + if (ugeth->tx_skbuff[i][j]) { + dma_unmap_single(NULL, + BD_BUFFER_ARG(bd), + (BD_STATUS_AND_LENGTH(bd) & + BD_LENGTH_MASK), + DMA_TO_DEVICE); + dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); + ugeth->tx_skbuff[i][j] = NULL; + } + } + + kfree(ugeth->tx_skbuff[i]); + + if (ugeth->p_tx_bd_ring[i]) { + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->tx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->tx_bd_ring_offset[i]); + ugeth->p_tx_bd_ring[i] = NULL; + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + /* Return existing data buffers in ring */ + bd = ugeth->p_rx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { + if (ugeth->rx_skbuff[i][j]) { + dma_unmap_single(NULL, BD_BUFFER(bd), + ugeth->ug_info-> + uf_info. + max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(ugeth-> + rx_skbuff[i][j]); + ugeth->rx_skbuff[i][j] = NULL; + } + bd += UCC_GETH_SIZE_OF_BD; + } + + kfree(ugeth->rx_skbuff[i]); + + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->rx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->rx_bd_ring_offset[i]); + ugeth->p_rx_bd_ring[i] = NULL; + } + } + while (!list_empty(&ugeth->group_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->group_hash_q))); + while (!list_empty(&ugeth->ind_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->ind_hash_q))); + +} + +static void ucc_geth_set_multi(struct net_device *dev) +{ + ucc_geth_private_t *ugeth; + struct dev_mc_list *dmi; + ucc_fast_t *uf_regs; + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_t tempaddr; + u8 *mcptr, *tdptr; + int i, j; + + ugeth = netdev_priv(dev); + + uf_regs = ugeth->uccf->uf_regs; + + if (dev->flags & IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + uf_regs->upsmr |= UPSMR_PRO; + + } else { + + uf_regs->upsmr &= ~UPSMR_PRO; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); + out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); + } else { + /* Clear filter and add the addresses in the list. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); + out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); + + dmi = dev->mc_list; + + for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { + + /* Only support group multicast for now. + */ + if (!(dmi->dmi_addr[0] & 1)) + continue; + + /* The address in dmi_addr is LSB first, + * and taddr is MSB first. We have to + * copy bytes MSB first from dmi_addr. + */ + mcptr = (u8 *) dmi->dmi_addr + 5; + tdptr = (u8 *) & tempaddr; + for (j = 0; j < 6; j++) + *tdptr++ = *mcptr--; + + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + hw_add_addr_in_hash(ugeth, &tempaddr); + + } + } + } +} + +static void ucc_geth_stop(ucc_geth_private_t *ugeth) +{ + ucc_geth_t *ug_regs = ugeth->ug_regs; + u32 tempval; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Disable the controller */ + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + /* Tell the kernel the link is down */ + ugeth->mii_info->link = 0; + adjust_link(ugeth->dev); + + /* Mask all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0x00000000); + + /* Clear all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0xffffffff); + + /* Disable Rx and Tx */ + tempval = in_be32(&ug_regs->maccfg1); + tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + out_be32(&ug_regs->maccfg1, tempval); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + /* Clear any pending interrupts */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY Interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_DISABLED); + } + + free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev); + } else { + del_timer_sync(&ugeth->phy_info_timer); + } + + ucc_geth_memclean(ugeth); +} + +static int ucc_geth_startup(ucc_geth_private_t *ugeth) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_geth_init_pram_t *p_init_enet_pram; + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + ucc_fast_info_t *uf_info; + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + int ret_val = -EINVAL; + u32 remoder = UCC_GETH_REMODER_INIT; + u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; + u32 ifstat, i, j, size, l2qt, l3qt, length; + u16 temoder = UCC_GETH_TEMODER_INIT; + u16 test; + u8 function_code = 0; + u8 *bd, *endOfRing; + u8 numThreadsRxNumerical, numThreadsTxNumerical; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || + (uf_info->bd_mem_part == MEM_PART_MURAM))) { + ugeth_err("%s: Bad memory partition value.", __FUNCTION__); + return -EINVAL; + } + + /* Rx BD lengths */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || + (ug_info->bdRingLenRx[i] % + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { + ugeth_err + ("%s: Rx BD ring length must be multiple of 4," + " no smaller than 8.", __FUNCTION__); + return -EINVAL; + } + } + + /* Tx BD lengths */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { + ugeth_err + ("%s: Tx BD ring length must be no smaller than 2.", + __FUNCTION__); + return -EINVAL; + } + } + + /* mrblr */ + if ((uf_info->max_rx_buf_length == 0) || + (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { + ugeth_err + ("%s: max_rx_buf_length must be non-zero multiple of 128.", + __FUNCTION__); + return -EINVAL; + } + + /* num Tx queues */ + if (ug_info->numQueuesTx > NUM_TX_QUEUES) { + ugeth_err("%s: number of tx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* num Rx queues */ + if (ug_info->numQueuesRx > NUM_RX_QUEUES) { + ugeth_err("%s: number of rx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* l2qt */ + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { + if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: VLAN priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + /* l3qt */ + for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { + if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: IP priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + if (ug_info->cam && !ug_info->ecamptr) { + ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", + __FUNCTION__); + return -EINVAL; + } + + if ((ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1) + && ug_info->rxExtendedFiltering) { + ugeth_err("%s: Number of station addresses greater than 1 " + "not allowed in extended parsing mode.", + __FUNCTION__); + return -EINVAL; + } + + /* Generate uccm_mask for receive */ + uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ + for (i = 0; i < ug_info->numQueuesRx; i++) + uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); + + for (i = 0; i < ug_info->numQueuesTx; i++) + uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); + /* Initialize the general fast UCC block. */ + if (ucc_fast_init(uf_info, &uccf)) { + ugeth_err("%s: Failed to init uccf.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->uccf = uccf; + + switch (ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + switch (ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + /* Calculate rx_extended_features */ + ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || + ug_info->ipAddressAlignment || + (ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1); + + ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || + (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (ug_info->vlanOperationNonTagged != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); + + uf_regs = uccf->uf_regs; + ug_regs = (ucc_geth_t *) (uccf->uf_regs); + ugeth->ug_regs = ug_regs; + + init_default_reg_vals(&uf_regs->upsmr, + &ug_regs->maccfg1, &ug_regs->maccfg2); + + /* Set UPSMR */ + /* For more details see the hardware spec. */ + init_rx_parameters(ug_info->bro, + ug_info->rsh, ug_info->pro, &uf_regs->upsmr); + + /* We're going to ignore other registers for now, */ + /* except as needed to get up and running */ + + /* Set MACCFG1 */ + /* For more details see the hardware spec. */ + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + 1, + ug_info->pausePeriod, + ug_info->extensionField, + &uf_regs->upsmr, + &ug_regs->uempr, &ug_regs->maccfg1); + + maccfg1 = in_be32(&ug_regs->maccfg1); + maccfg1 |= MACCFG1_ENABLE_RX; + maccfg1 |= MACCFG1_ENABLE_TX; + out_be32(&ug_regs->maccfg1, maccfg1); + + /* Set IPGIFG */ + /* For more details see the hardware spec. */ + ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, + ug_info->nonBackToBackIfgPart2, + ug_info-> + miminumInterFrameGapEnforcement, + ug_info->backToBackInterFrameGap, + &ug_regs->ipgifg); + if (ret_val != 0) { + ugeth_err("%s: IPGIFG initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set HAFDUP */ + /* For more details see the hardware spec. */ + ret_val = init_half_duplex_params(ug_info->altBeb, + ug_info->backPressureNoBackoff, + ug_info->noBackoff, + ug_info->excessDefer, + ug_info->altBebTruncation, + ug_info->maxRetransmission, + ug_info->collisionWindow, + &ug_regs->hafdup); + if (ret_val != 0) { + ugeth_err("%s: Half Duplex initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set IFSTAT */ + /* For more details see the hardware spec. */ + /* Read only - resets upon read */ + ifstat = in_be32(&ug_regs->ifstat); + + /* Clear UEMPR */ + /* For more details see the hardware spec. */ + out_be32(&ug_regs->uempr, 0); + + /* Set UESCR */ + /* For more details see the hardware spec. */ + init_hw_statistics_gathering_mode((ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), + 0, &uf_regs->upsmr, &ug_regs->uescr); + + /* Allocate Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Allocate in multiple of + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, + according to spec */ + length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) + / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) % + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_TX_BD_RING_ALIGNMENT; + ugeth->tx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), + GFP_KERNEL)); + if (ugeth->tx_bd_ring_offset[j] != 0) + ugeth->p_tx_bd_ring[j] = + (void*)((ugeth->tx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->tx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_TX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j])) + ugeth->p_tx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + tx_bd_ring_offset[j]); + } + if (!ugeth->p_tx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Tx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero unused end of bd ring, according to spec */ + memset(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0, + length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD); + } + + /* Allocate Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_RX_BD_RING_ALIGNMENT; + ugeth->rx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); + if (ugeth->rx_bd_ring_offset[j] != 0) + ugeth->p_rx_bd_ring[j] = + (void*)((ugeth->rx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->rx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_RX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j])) + ugeth->p_rx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + rx_bd_ring_offset[j]); + } + if (!ugeth->p_rx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Rx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + } + + /* Init Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Setup the skbuff rings */ + ugeth->tx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenTx[j], + GFP_KERNEL); + + if (ugeth->tx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate tx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) + ugeth->tx_skbuff[j][i] = NULL; + + ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; + bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { + BD_BUFFER_CLEAR(bd); + BD_STATUS_AND_LENGTH_SET(bd, 0); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */ + } + + /* Init Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + /* Setup the skbuff rings */ + ugeth->rx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenRx[j], + GFP_KERNEL); + + if (ugeth->rx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate rx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) + ugeth->rx_skbuff[j][i] = NULL; + + ugeth->skb_currx[j] = 0; + bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { + BD_STATUS_AND_LENGTH_SET(bd, R_I); + BD_BUFFER_CLEAR(bd); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */ + } + + /* + * Global PRAM + */ + /* Tx global PRAM */ + /* Allocate global tx parameter RAM page */ + ugeth->tx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t), + UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_glbl_pram = + (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth-> + tx_glbl_pram_offset); + /* Zero out p_tx_glbl_pram */ + memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t)); + + /* Fill global PRAM */ + + /* TQPTR */ + /* Size varies with number of Tx threads */ + ugeth->thread_dat_tx_offset = + qe_muram_alloc(numThreadsTxNumerical * + sizeof(ucc_geth_thread_data_tx_t) + + 32 * (numThreadsTxNumerical == 1), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_tx = + (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth-> + thread_dat_tx_offset); + out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); + + /* vtagtable */ + for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) + out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], + ug_info->vtagtable[i]); + + /* iphoffset */ + for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) + ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; + + /* SQPTR */ + /* Size varies with number of Tx queues */ + ugeth->send_q_mem_reg_offset = + qe_muram_alloc(ug_info->numQueuesTx * + sizeof(ucc_geth_send_queue_qd_t), + UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_send_q_mem_reg = + (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth-> + send_q_mem_reg_offset); + out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + endOfRing = + ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - + 1) * UCC_GETH_SIZE_OF_BD; + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) virt_to_phys(endOfRing)); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) immrbar_virt_to_phys(ugeth-> + p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) immrbar_virt_to_phys(endOfRing)); + } + } + + /* schedulerbasepointer */ + + if (ug_info->numQueuesTx > 1) { + /* scheduler exists only if more than 1 tx queue */ + ugeth->scheduler_offset = + qe_muram_alloc(sizeof(ucc_geth_scheduler_t), + UCC_GETH_SCHEDULER_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->scheduler_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_scheduler.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_scheduler = + (ucc_geth_scheduler_t *) qe_muram_addr(ugeth-> + scheduler_offset); + out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, + ugeth->scheduler_offset); + /* Zero out p_scheduler */ + memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t)); + + /* Set values in scheduler */ + out_be32(&ugeth->p_scheduler->mblinterval, + ug_info->mblinterval); + out_be16(&ugeth->p_scheduler->nortsrbytetime, + ug_info->nortsrbytetime); + ugeth->p_scheduler->fracsiz = ug_info->fracsiz; + ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; + ugeth->p_scheduler->txasap = ug_info->txasap; + ugeth->p_scheduler->extrabw = ug_info->extrabw; + for (i = 0; i < NUM_TX_QUEUES; i++) + ugeth->p_scheduler->weightfactor[i] = + ug_info->weightfactor[i]; + + /* Set pointers to cpucount registers in scheduler */ + ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); + ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); + ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); + ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); + ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); + ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); + ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); + ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); + } + + /* schedulerbasepointer */ + /* TxRMON_PTR (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + ugeth->tx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_tx_firmware_statistics_pram_t), + UCC_GETH_TX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_tx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_fw_statistics_pram = + (ucc_geth_tx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); + /* Zero out p_tx_fw_statistics_pram */ + memset(ugeth->p_tx_fw_statistics_pram, + 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t)); + } + + /* temoder */ + /* Already has speed set */ + + if (ug_info->numQueuesTx > 1) + temoder |= TEMODER_SCHEDULER_ENABLE; + if (ug_info->ipCheckSumGenerate) + temoder |= TEMODER_IP_CHECKSUM_GENERATE; + temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); + out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); + + test = in_be16(&ugeth->p_tx_glbl_pram->temoder); + + /* Function code register value to be used later */ + function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; + /* Required for QE */ + + /* function code register */ + out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); + + /* Rx global PRAM */ + /* Allocate global rx parameter RAM page */ + ugeth->rx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t), + UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_glbl_pram = + (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth-> + rx_glbl_pram_offset); + /* Zero out p_rx_glbl_pram */ + memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t)); + + /* Fill global PRAM */ + + /* RQPTR */ + /* Size varies with number of Rx threads */ + ugeth->thread_dat_rx_offset = + qe_muram_alloc(numThreadsRxNumerical * + sizeof(ucc_geth_thread_data_rx_t), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_rx = + (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth-> + thread_dat_rx_offset); + out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); + + /* typeorlen */ + out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); + + /* rxrmonbaseptr (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + ugeth->rx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_rx_firmware_statistics_pram_t), + UCC_GETH_RX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_fw_statistics_pram = + (ucc_geth_rx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); + /* Zero out p_rx_fw_statistics_pram */ + memset(ugeth->p_rx_fw_statistics_pram, 0, + sizeof(ucc_geth_rx_firmware_statistics_pram_t)); + } + + /* intCoalescingPtr */ + + /* Size varies with number of Rx queues */ + ugeth->rx_irq_coalescing_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + sizeof(ucc_geth_rx_interrupt_coalescing_entry_t), + UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_irq_coalescing_tbl.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_irq_coalescing_tbl = + (ucc_geth_rx_interrupt_coalescing_table_t *) + qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, + ugeth->rx_irq_coalescing_tbl_offset); + + /* Fill interrupt coalescing table */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingmaxvalue, + ug_info->interruptcoalescingmaxvalue[i]); + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingcounter, + ug_info->interruptcoalescingmaxvalue[i]); + } + + /* MRBLR */ + init_max_rx_buff_len(uf_info->max_rx_buf_length, + &ugeth->p_rx_glbl_pram->mrblr); + /* MFLR */ + out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); + /* MINFLR */ + init_min_frame_len(ug_info->minFrameLength, + &ugeth->p_rx_glbl_pram->minflr, + &ugeth->p_rx_glbl_pram->mrblr); + /* MAXD1 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); + /* MAXD2 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); + + /* l2qt */ + l2qt = 0; + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) + l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); + + /* l3qt */ + for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { + l3qt = 0; + for (i = 0; i < 8; i++) + l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt); + } + + /* vlantype */ + out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); + + /* vlantci */ + out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); + + /* ecamptr */ + out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); + + /* RBDQPTR */ + /* Size varies with number of Rx queues */ + ugeth->rx_bd_qs_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t)), + UCC_GETH_RX_BD_QUEUES_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_bd_qs_tbl = + (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth-> + rx_bd_qs_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); + /* Zero out p_rx_bd_qs_tbl */ + memset(ugeth->p_rx_bd_qs_tbl, + 0, + ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t))); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) immrbar_virt_to_phys(ugeth-> + p_rx_bd_ring[i])); + } + /* rest of fields handled by QE */ + } + + /* remoder */ + /* Already has speed set */ + + if (ugeth->rx_extended_features) + remoder |= REMODER_RX_EXTENDED_FEATURES; + if (ug_info->rxExtendedFiltering) + remoder |= REMODER_RX_EXTENDED_FILTERING; + if (ug_info->dynamicMaxFrameLength) + remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; + if (ug_info->dynamicMinFrameLength) + remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; + remoder |= + ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; + remoder |= + ug_info-> + vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; + remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; + remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); + if (ug_info->ipCheckSumCheck) + remoder |= REMODER_IP_CHECKSUM_CHECK; + if (ug_info->ipAddressAlignment) + remoder |= REMODER_IP_ADDRESS_ALIGNMENT; + out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); + + /* Note that this function must be called */ + /* ONLY AFTER p_tx_fw_statistics_pram */ + /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ + init_firmware_statistics_gathering_mode((ug_info-> + statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), + (ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), + &ugeth->p_tx_glbl_pram->txrmonbaseptr, + ugeth->tx_fw_statistics_pram_offset, + &ugeth->p_rx_glbl_pram->rxrmonbaseptr, + ugeth->rx_fw_statistics_pram_offset, + &ugeth->p_tx_glbl_pram->temoder, + &ugeth->p_rx_glbl_pram->remoder); + + /* function code register */ + ugeth->p_rx_glbl_pram->rstate = function_code; + + /* initialize extended filtering */ + if (ug_info->rxExtendedFiltering) { + if (!ug_info->extendedFilteringChainPointer) { + ugeth_err("%s: Null Extended Filtering Chain Pointer.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + + /* Allocate memory for extended filtering Mode Global + Parameters */ + ugeth->exf_glbl_param_offset = + qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t), + UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_exf_glbl_param.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_exf_glbl_param = + (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth-> + exf_glbl_param_offset); + out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, + ugeth->exf_glbl_param_offset); + out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, + (u32) ug_info->extendedFilteringChainPointer); + + } else { /* initialize 82xx style address filtering */ + + /* Init individual address recognition registers to disabled */ + + for (j = 0; j < NUM_OF_PADDRS; j++) + ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); + + /* Create CQs for hash tables */ + if (ug_info->maxGroupAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->group_hash_q); + } + if (ug_info->maxIndAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->ind_hash_q); + } + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_GROUP); + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_INDIVIDUAL); + } + + /* + * Initialize UCC at QE level + */ + + command = QE_INIT_TX_RX; + + /* Allocate shadow InitEnet command parameter structure. + * This is needed because after the InitEnet command is executed, + * the structure in DPRAM is released, because DPRAM is a premium + * resource. + * This shadow structure keeps a copy of what was done so that the + * allocated resources can be released when the channel is freed. + */ + if (!(ugeth->p_init_enet_param_shadow = + (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t), + GFP_KERNEL))) { + ugeth_err + ("%s: Can not allocate memory for" + " p_UccInitEnetParamShadows.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero out *p_init_enet_param_shadow */ + memset((char *)ugeth->p_init_enet_param_shadow, + 0, sizeof(ucc_geth_init_pram_t)); + + /* Fill shadow InitEnet command parameter structure */ + + ugeth->p_init_enet_param_shadow->resinit1 = + ENET_INIT_PARAM_MAGIC_RES_INIT1; + ugeth->p_init_enet_param_shadow->resinit2 = + ENET_INIT_PARAM_MAGIC_RES_INIT2; + ugeth->p_init_enet_param_shadow->resinit3 = + ENET_INIT_PARAM_MAGIC_RES_INIT3; + ugeth->p_init_enet_param_shadow->resinit4 = + ENET_INIT_PARAM_MAGIC_RES_INIT4; + ugeth->p_init_enet_param_shadow->resinit5 = + ENET_INIT_PARAM_MAGIC_RES_INIT5; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; + + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ugeth->rx_glbl_pram_offset | ug_info->riscRx; + if ((ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { + ugeth_err("%s: Invalid largest External Lookup Key Size.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = + ug_info->largestexternallookupkeysize; + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ug_info->rxExtendedFiltering) { + size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> + p_init_enet_param_shadow->rxthread[0]), + (u8) (numThreadsRxNumerical + 1) + /* Rx needs one extra for terminator */ + , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, + ug_info->riscRx, 1)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + ugeth->p_init_enet_param_shadow->txglobal = + ugeth->tx_glbl_pram_offset | ug_info->riscTx; + if ((ret_val = + fill_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), numThreadsTxNumerical, + sizeof(ucc_geth_thread_tx_pram_t), + UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, + ug_info->riscTx, 0)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Load Rx bds with buffers */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { + ugeth_err("%s: Can not fill Rx bds with buffers.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + } + + /* Allocate InitEnet command parameter structure */ + init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4); + if (IS_MURAM_ERR(init_enet_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + p_init_enet_pram = + (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset); + + /* Copy shadow InitEnet command parameter structure into PRAM */ + p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; + p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; + p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; + p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; + out_be16(&p_init_enet_pram->resinit5, + ugeth->p_init_enet_param_shadow->resinit5); + p_init_enet_pram->largestexternallookupkeysize = + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; + out_be32(&p_init_enet_pram->rgftgfrxglobal, + ugeth->p_init_enet_param_shadow->rgftgfrxglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) + out_be32(&p_init_enet_pram->rxthread[i], + ugeth->p_init_enet_param_shadow->rxthread[i]); + out_be32(&p_init_enet_pram->txglobal, + ugeth->p_init_enet_param_shadow->txglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) + out_be32(&p_init_enet_pram->txthread[i], + ugeth->p_init_enet_param_shadow->txthread[i]); + + /* Issue QE command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + init_enet_pram_offset); + + /* Free InitEnet command parameter */ + qe_muram_free(init_enet_pram_offset); + + return 0; +} + +/* returns a net_device_stats structure pointer */ +static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + return &(ugeth->stats); +} + +/* ucc_geth_timeout gets called when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. */ +static void ucc_geth_timeout(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth->stats.tx_errors++; + + ugeth_dump_regs(ugeth); + + if (dev->flags & IFF_UP) { + ucc_geth_stop(ugeth); + ucc_geth_startup(ugeth); + } + + netif_schedule(dev); +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + u8 txQ = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + ugeth->stats.tx_bytes += skb->len; + + /* Start from the next BD that should be filled */ + bd = ugeth->txBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + /* Save the skb pointer so we can free it later */ + ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + ugeth->skb_curtx[txQ] = + (ugeth->skb_curtx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* set up the buffer descriptor */ + BD_BUFFER_SET(bd, + dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); + + //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); + + bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; + + BD_STATUS_AND_LENGTH_SET(bd, bd_status); + + dev->trans_start = jiffies; + + /* Move to next BD in the ring */ + if (!(bd_status & T_W)) + ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD; + else + ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (bd == ugeth->confBd[txQ]) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + if (ugeth->p_scheduler) { + ugeth->cpucount[txQ]++; + /* Indicate to QE that there are more Tx bds ready for + transmission */ + /* This is done by writing a running counter of the bd + count to the scheduler PRAM. */ + out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); + } + + spin_unlock_irq(&ugeth->lock); + + return 0; +} + +static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit) +{ + struct sk_buff *skb; + u8 *bd; + u16 length, howmany = 0; + u32 bd_status; + u8 *bdBuffer; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock(&ugeth->lock); + /* collect received buffers */ + bd = ugeth->rxBd[rxQ]; + + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { + bdBuffer = (u8 *) BD_BUFFER(bd); + length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); + skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; + + /* determine whether buffer is first, last, first and last + (single buffer frame) or middle (not first and not last) */ + if (!skb || + (!(bd_status & (R_F | R_L))) || + (bd_status & R_ERRORS_FATAL)) { + ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", + __FUNCTION__, __LINE__, (u32) skb); + if (skb) + dev_kfree_skb_any(skb); + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; + ugeth->stats.rx_dropped++; + } else { + ugeth->stats.rx_packets++; + howmany++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, ugeth->dev); + + ugeth->stats.rx_bytes += length; + /* Send the packet up the stack */ +#ifdef CONFIG_UGETH_NAPI + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif /* CONFIG_UGETH_NAPI */ + } + + ugeth->dev->last_rx = jiffies; + + skb = get_new_skb(ugeth, bd); + if (!skb) { + ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); + spin_unlock(&ugeth->lock); + ugeth->stats.rx_dropped++; + break; + } + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; + + /* update to point at the next skb */ + ugeth->skb_currx[rxQ] = + (ugeth->skb_currx[rxQ] + + 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); + + if (bd_status & R_W) + bd = ugeth->p_rx_bd_ring[rxQ]; + else + bd += UCC_GETH_SIZE_OF_BD; + + bd_status = BD_STATUS_AND_LENGTH(bd); + } + + ugeth->rxBd[rxQ] = bd; + spin_unlock(&ugeth->lock); + return howmany; +} + +static int ucc_geth_tx(struct net_device *dev, u8 txQ) +{ + /* Start from the next BD that should be filled */ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + + bd = ugeth->confBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* Normal processing. */ + while ((bd_status & T_R) == 0) { + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) + break; + + ugeth->stats.tx_packets++; + + /* Free the sk buffer associated with this TxBD */ + dev_kfree_skb_irq(ugeth-> + tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); + ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; + ugeth->skb_dirtytx[txQ] = + (ugeth->skb_dirtytx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W)) + ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD; + else + ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + } + return 0; +} + +#ifdef CONFIG_UGETH_NAPI +static int ucc_geth_poll(struct net_device *dev, int *budget) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int howmany; + int rx_work_limit = *budget; + u8 rxQ = 0; + + if (rx_work_limit > dev->quota) + rx_work_limit = dev->quota; + + howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit); + + dev->quota -= howmany; + rx_work_limit -= howmany; + *budget -= howmany; + + if (rx_work_limit >= 0) + netif_rx_complete(dev); + + return (rx_work_limit < 0) ? 1 : 0; +} +#endif /* CONFIG_UGETH_NAPI */ + +static irqreturn_t ucc_geth_irq_handler(int irq, void *info, + struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)info; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + register u32 ucce = 0; + register u32 bit_mask = UCCE_RXBF_SINGLE_MASK; + register u32 tx_mask = UCCE_TXBF_SINGLE_MASK; + register u8 i; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + if (!ugeth) + return IRQ_NONE; + + uccf = ugeth->uccf; + ug_info = ugeth->ug_info; + + do { + ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm)); + + /* clear event bits for next time */ + /* Side effect here is to mask ucce variable + for future processing below. */ + out_be32(uccf->p_ucce, ucce); /* Clear with ones, + but only bits in UCCM */ + + /* We ignore Tx interrupts because Tx confirmation is + done inside Tx routine */ + + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ucce & bit_mask) + ucc_geth_rx(ugeth, i, + (int)ugeth->ug_info-> + bdRingLenRx[i]); + ucce &= ~bit_mask; + bit_mask <<= 1; + } + + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ucce & tx_mask) + ucc_geth_tx(dev, i); + ucce &= ~tx_mask; + tx_mask <<= 1; + } + + /* Exceptions */ + if (ucce & UCCE_BSY) { + ugeth_vdbg("Got BUSY irq!!!!"); + ugeth->stats.rx_errors++; + ucce &= ~UCCE_BSY; + } + if (ucce & UCCE_OTHER) { + ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!", + ucce); + ugeth->stats.rx_errors++; + ucce &= ~ucce; + } + } + while (ucce); + + return IRQ_HANDLED; +} + +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)dev_id; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupt */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED); + + /* Schedule the phy change */ + schedule_work(&ugeth->tq); + + return IRQ_HANDLED; +} + +/* Scheduled by the phy_interrupt/timer to handle PHY changes */ +static void ugeth_phy_change(void *data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + int result = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_regs = ugeth->ug_regs; + + /* Delay to give the PHY a chance to change the + * register state */ + msleep(1); + + /* Update the link, speed, duplex */ + result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info); + + /* Adjust the known status as long as the link + * isn't still coming up */ + if ((0 == result) || (ugeth->mii_info->link == 0)) + adjust_link(dev); + + /* Reenable interrupts, if needed */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); +} + +/* Called every so often on systems that don't interrupt + * the core for PHY changes */ +static void ugeth_phy_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + schedule_work(&ugeth->tq); + + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Keep trying aneg for some time + * If, after GFAR_AN_TIMEOUT seconds, it has not + * finished, we switch to forced. + * Either way, once the process has completed, we either + * request the interrupt, or switch the timer over to + * using ugeth_phy_timer to check status */ +static void ugeth_phy_startup_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev); + static int secondary = UGETH_AN_TIMEOUT; + int result; + + /* Configure the Auto-negotiation */ + result = mii_info->phyinfo->config_aneg(mii_info); + + /* If autonegotiation failed to start, and + * we haven't timed out, reset the timer, and return */ + if (result && secondary--) { + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + return; + } else if (result) { + /* Couldn't start autonegotiation. + * Try switching to forced */ + mii_info->autoneg = 0; + result = mii_info->phyinfo->config_aneg(mii_info); + + /* Forcing failed! Give up */ + if (result) { + ugeth_err("%s: Forcing failed!", mii_info->dev->name); + return; + } + } + + /* Kill the timer so it can be restarted */ + del_timer_sync(&ugeth->phy_info_timer); + + /* Grab the PHY interrupt, if necessary/possible */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + if (request_irq(ugeth->ug_info->phy_interrupt, + phy_interrupt, + SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) { + ugeth_err("%s: Can't get IRQ %d (PHY)", + mii_info->dev->name, + ugeth->ug_info->phy_interrupt); + } else { + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); + return; + } + } + + /* Start the timer again, this time in order to + * handle a change in status */ + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_timer; + ugeth->phy_info_timer.data = (unsigned long)mii_info->dev; + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int ucc_geth_open(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int err; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Test station address */ + if (dev->dev_addr[0] & ENET_GROUP_ADDR) { + ugeth_err("%s: Multicast address used for station address" + " - is this what you wanted?", __FUNCTION__); + return -EINVAL; + } + + err = ucc_geth_startup(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + err = adjust_enet_interface(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + /* Set MACSTNADDR1, MACSTNADDR2 */ + /* For more details see the hardware spec. */ + init_mac_station_addr_regs(dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5], + &ugeth->ug_regs->macstnaddr1, + &ugeth->ug_regs->macstnaddr2); + + err = init_phy(dev); + if (err) { + ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name); + return err; + } +#ifndef CONFIG_UGETH_NAPI + err = + request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, + "UCC Geth", dev); + if (err) { + ugeth_err("%s: Cannot get IRQ for net device, aborting.", + dev->name); + ucc_geth_stop(ugeth); + return err; + } +#endif /* CONFIG_UGETH_NAPI */ + + /* Set up the PHY change work queue */ + INIT_WORK(&ugeth->tq, ugeth_phy_change, dev); + + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_startup_timer; + ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info; + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + + err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + if (err) { + ugeth_err("%s: Cannot enable net device, aborting.", dev->name); + ucc_geth_stop(ugeth); + return err; + } + + netif_start_queue(dev); + + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int ucc_geth_close(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ucc_geth_stop(ugeth); + + /* Shutdown the PHY */ + if (ugeth->mii_info->phyinfo->close) + ugeth->mii_info->phyinfo->close(ugeth->mii_info); + + kfree(ugeth->mii_info); + + netif_stop_queue(dev); + + return 0; +} + +struct ethtool_ops ucc_geth_ethtool_ops = { + .get_settings = NULL, + .get_drvinfo = NULL, + .get_regs_len = NULL, + .get_regs = NULL, + .get_link = NULL, + .get_coalesce = NULL, + .set_coalesce = NULL, + .get_ringparam = NULL, + .set_ringparam = NULL, + .get_strings = NULL, + .get_stats_count = NULL, + .get_ethtool_stats = NULL, +}; + +static int ucc_geth_probe(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct ucc_geth_platform_data *ugeth_pdata; + struct net_device *dev = NULL; + struct ucc_geth_private *ugeth = NULL; + struct ucc_geth_info *ug_info; + int err; + static int mii_mng_configured = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data; + + ug_info = &ugeth_info[pdev->id]; + ug_info->uf_info.ucc_num = pdev->id; + ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock; + ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock; + ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr; + ug_info->uf_info.irq = platform_get_irq(pdev, 0); + ug_info->phy_address = ugeth_pdata->phy_id; + ug_info->enet_interface = ugeth_pdata->phy_interface; + ug_info->board_flags = ugeth_pdata->board_flags; + ug_info->phy_interrupt = ugeth_pdata->phy_interrupt; + + printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", + ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + ug_info->uf_info.irq); + + if (ug_info == NULL) { + ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, + pdev->id); + return -ENODEV; + } + + if (!mii_mng_configured) { + ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num); + mii_mng_configured = 1; + } + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof(*ugeth)); + + if (dev == NULL) + return -ENOMEM; + + ugeth = netdev_priv(dev); + spin_lock_init(&ugeth->lock); + + dev_set_drvdata(device, dev); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long)(ug_info->uf_info.regs); + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, device); + + /* Fill in the dev structure */ + dev->open = ucc_geth_open; + dev->hard_start_xmit = ucc_geth_start_xmit; + dev->tx_timeout = ucc_geth_timeout; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_UGETH_NAPI + dev->poll = ucc_geth_poll; + dev->weight = UCC_GETH_DEV_WEIGHT; +#endif /* CONFIG_UGETH_NAPI */ + dev->stop = ucc_geth_close; + dev->get_stats = ucc_geth_get_stats; +// dev->change_mtu = ucc_geth_change_mtu; + dev->mtu = 1500; + dev->set_multicast_list = ucc_geth_set_multi; + dev->ethtool_ops = &ucc_geth_ethtool_ops; + + err = register_netdev(dev); + if (err) { + ugeth_err("%s: Cannot register net device, aborting.", + dev->name); + free_netdev(dev); + return err; + } + + ugeth->ug_info = ug_info; + ugeth->dev = dev; + memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6); + + return 0; +} + +static int ucc_geth_remove(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct ucc_geth_private *ugeth = netdev_priv(dev); + + dev_set_drvdata(device, NULL); + ucc_geth_memclean(ugeth); + free_netdev(dev); + + return 0; +} + +/* Structure for a device driver */ +static struct device_driver ucc_geth_driver = { + .name = DRV_NAME, + .bus = &platform_bus_type, + .probe = ucc_geth_probe, + .remove = ucc_geth_remove, +}; + +static int __init ucc_geth_init(void) +{ + int i; + printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); + for (i = 0; i < 8; i++) + memcpy(&(ugeth_info[i]), &ugeth_primary_info, + sizeof(ugeth_primary_info)); + + return driver_register(&ucc_geth_driver); +} + +static void __exit ucc_geth_exit(void) +{ + driver_unregister(&ucc_geth_driver); +} + +module_init(ucc_geth_init); +module_exit(ucc_geth_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h new file mode 100644 index 000000000000..005965f5dd9b --- /dev/null +++ b/drivers/net/ucc_geth.h @@ -0,0 +1,1339 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * Internal header file for UCC Gigabit Ethernet unit routines. + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_GETH_H__ +#define __UCC_GETH_H__ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/fsl_devices.h> + +#include <asm/immap_qe.h> +#include <asm/qe.h> + +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +#define NUM_TX_QUEUES 8 +#define NUM_RX_QUEUES 8 +#define NUM_BDS_IN_PREFETCHED_BDS 4 +#define TX_IP_OFFSET_ENTRY_MAX 8 +#define NUM_OF_PADDRS 4 +#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 +#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 + +typedef struct ucc_mii_mng { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +} __attribute__ ((packed)) ucc_mii_mng_t; + +typedef struct ucc_geth { + ucc_fast_t uccf; + + u32 maccfg1; /* mac configuration reg. 1 */ + u32 maccfg2; /* mac configuration reg. 2 */ + u32 ipgifg; /* interframe gap reg. */ + u32 hafdup; /* half-duplex reg. */ + u8 res1[0x10]; + ucc_mii_mng_t miimng; /* MII management structure */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ + u32 macstnaddr1; /* mac station address part 1 reg */ + u32 macstnaddr2; /* mac station address part 2 reg */ + u8 res2[0x8]; + u32 uempr; /* UCC Ethernet Mac parameter reg */ + u32 utbipar; /* UCC tbi address reg */ + u16 uescr; /* UCC Ethernet statistics control reg */ + u8 res3[0x180 - 0x15A]; + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u8 res4[0x2]; + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ + u32 scar; /* Statistics carry register */ + u32 scam; /* Statistics caryy mask register */ + u8 res5[0x200 - 0x1c4]; +} __attribute__ ((packed)) ucc_geth_t; + +/* UCC GETH TEMODR Register */ +#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics + */ +#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ +#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 + checksums */ +#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance + optimization + enhancement (mode1) */ +#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics + */ +#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << + shift */ + +/* UCC GETH TEMODR Register */ +#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx + statistics */ +#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable + extended + features */ +#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation + tagged << shift */ +#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non + tagged << shift */ +#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift + */ +#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx + statistics */ +#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended + filtering + vs. + mpc82xx-like + filtering */ +#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << + shift */ +#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable + dynamic max + frame length + */ +#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable + dynamic min + frame length + */ +#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 + checksums */ +#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip + address to + 4-byte + boundary */ + +/* UCC GETH Event Register */ +#define UCCE_MPD 0x80000000 /* Magic packet + detection */ +#define UCCE_SCAR 0x40000000 +#define UCCE_GRA 0x20000000 /* Tx graceful + stop + complete */ +#define UCCE_CBPR 0x10000000 +#define UCCE_BSY 0x08000000 +#define UCCE_RXC 0x04000000 +#define UCCE_TXC 0x02000000 +#define UCCE_TXE 0x01000000 +#define UCCE_TXB7 0x00800000 +#define UCCE_TXB6 0x00400000 +#define UCCE_TXB5 0x00200000 +#define UCCE_TXB4 0x00100000 +#define UCCE_TXB3 0x00080000 +#define UCCE_TXB2 0x00040000 +#define UCCE_TXB1 0x00020000 +#define UCCE_TXB0 0x00010000 +#define UCCE_RXB7 0x00008000 +#define UCCE_RXB6 0x00004000 +#define UCCE_RXB5 0x00002000 +#define UCCE_RXB4 0x00001000 +#define UCCE_RXB3 0x00000800 +#define UCCE_RXB2 0x00000400 +#define UCCE_RXB1 0x00000200 +#define UCCE_RXB0 0x00000100 +#define UCCE_RXF7 0x00000080 +#define UCCE_RXF6 0x00000040 +#define UCCE_RXF5 0x00000020 +#define UCCE_RXF4 0x00000010 +#define UCCE_RXF3 0x00000008 +#define UCCE_RXF2 0x00000004 +#define UCCE_RXF1 0x00000002 +#define UCCE_RXF0 0x00000001 + +#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) +#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) + +#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ + UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) +#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ + UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) +#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\ + UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0) +#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\ + UCCE_RXC | UCCE_TXC | UCCE_TXE) + +/* UCC GETH UPSMR (Protocol Specific Mode Register) */ +#define UPSMR_ECM 0x04000000 /* Enable CAM + Miss or + Enable + Filtering + Miss */ +#define UPSMR_HSE 0x02000000 /* Hardware + Statistics + Enable */ +#define UPSMR_PRO 0x00400000 /* Promiscuous*/ +#define UPSMR_CAP 0x00200000 /* CAM polarity + */ +#define UPSMR_RSH 0x00100000 /* Receive + Short Frames + */ +#define UPSMR_RPM 0x00080000 /* Reduced Pin + Mode + interfaces */ +#define UPSMR_R10M 0x00040000 /* RGMII/RMII + 10 Mode */ +#define UPSMR_RLPB 0x00020000 /* RMII + Loopback + Mode */ +#define UPSMR_TBIM 0x00010000 /* Ten-bit + Interface + Mode */ +#define UPSMR_RMM 0x00001000 /* RMII/RGMII + Mode */ +#define UPSMR_CAM 0x00000400 /* CAM Address + Matching */ +#define UPSMR_BRO 0x00000200 /* Broadcast + Address */ +#define UPSMR_RES1 0x00002000 /* Reserved + feild - must + be 1 */ + +/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ +#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control + Rx */ +#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control + Tx */ +#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable + synchronized + to Rx stream + */ +#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ +#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable + synchronized + to Tx stream + */ +#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ + +/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ +#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble + Length << + shift */ +#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble + Length mask */ +#define MACCFG2_SRP 0x00000080 /* Soft Receive + Preamble */ +#define MACCFG2_STP 0x00000040 /* Soft + Transmit + Preamble */ +#define MACCFG2_RESERVED_1 0x00000020 /* Reserved - + must be set + to 1 */ +#define MACCFG2_LC 0x00000010 /* Length Check + */ +#define MACCFG2_MPE 0x00000008 /* Magic packet + detect */ +#define MACCFG2_FDX 0x00000001 /* Full Duplex */ +#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex + mask */ +#define MACCFG2_PAD_CRC 0x00000004 +#define MACCFG2_CRC_EN 0x00000002 +#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither + Padding + short frames + nor CRC */ +#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC + only */ +#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 +#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode + (MII/RMII/RGMII + 10/100bps) */ +#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode + (GMII/TBI/RTB/RGMII + 1000bps ) */ +#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask + covering all + relevant + bits */ + +/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non + back-to-back + inter frame + gap part 1. + << shift */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non + back-to-back + inter frame + gap part 2. + << shift */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG + Enforcement + << shift */ +#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back + inter frame + gap << shift + */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back + inter frame gap part + 1. max val */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back + inter frame gap part + 2. max val */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG + Enforcement max val */ +#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter + frame gap max val */ +#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 +#define IPGIFG_NBTB_IPG_MASK 0x007F0000 +#define IPGIFG_MIN_IFG_MASK 0x0000FF00 +#define IPGIFG_BTB_IPG_MASK 0x0000007F + +/* UCC GETH HAFDUP (Half Duplex Register) */ +#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate + Binary + Exponential + Backoff + Truncation + << shift */ +#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary + Exponential Backoff + Truncation max val */ +#define HALFDUP_ALT_BEB 0x00080000 /* Alternate + Binary + Exponential + Backoff */ +#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back + pressure no + backoff */ +#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ +#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive + Defer */ +#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum + Retransmission + << shift */ +#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum + Retransmission max + val */ +#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision + Window << + shift */ +#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max + val */ +#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 +#define HALFDUP_RETRANS_MASK 0x0000F000 +#define HALFDUP_COL_WINDOW_MASK 0x0000003F + +/* UCC GETH UCCS (Ethernet Status Register) */ +#define UCCS_BPR 0x02 /* Back pressure (in + half duplex mode) */ +#define UCCS_PAU 0x02 /* Pause state (in full + duplex mode) */ +#define UCCS_MPD 0x01 /* Magic Packet + Detected */ + +/* UCC GETH MIIMCFG (MII Management Configuration Register) */ +#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset + management */ +#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble + suppress */ +#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide + << shift */ +#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by + 112 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by + 160 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by + 224 */ + +/* UCC GETH MIIMCOM (MII Management Command Register) */ +#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */ +#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */ + +/* UCC GETH MIIMADD (MII Management Address Register) */ +#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address + << shift */ +#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register + << shift */ + +/* UCC GETH MIIMCON (MII Management Control Register) */ +#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control + << shift */ +#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status + << shift */ + +/* UCC GETH MIIMIND (MII Management Indicator Register) */ +#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */ +#define MIIMIND_SCAN 0x00000002 /* Scan in + progress */ +#define MIIMIND_BUSY 0x00000001 + +/* UCC GETH IFSTAT (Interface Status Register) */ +#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive + transmission + defer */ + +/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ +#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station + address 6th + octet << + shift */ +#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station + address 5th + octet << + shift */ +#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station + address 4th + octet << + shift */ +#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station + address 3rd + octet << + shift */ + +/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ +#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station + address 2nd + octet << + shift */ +#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station + address 1st + octet << + shift */ + +/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ +#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time + value << + shift */ +#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended + pause time + value << + shift */ + +/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ +#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address + << shift */ +#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address + mask */ + +/* UCC GETH UESCR (Ethernet Statistics Control Register) */ +#define UESCR_AUTOZ 0x8000 /* Automatically zero + addressed + statistical counter + values */ +#define UESCR_CLRCNT 0x4000 /* Clear all statistics + counters */ +#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max + Coalescing + Value << + shift */ +#define UESCR_SCOV_SHIFT (15 - 15) /* Status + Coalescing + Value << + shift */ + +/* UCC GETH UDSR (Data Synchronization Register) */ +#define UDSR_MAGIC 0x067E + +typedef struct ucc_geth_thread_data_tx { + u8 res0[104]; +} __attribute__ ((packed)) ucc_geth_thread_data_tx_t; + +typedef struct ucc_geth_thread_data_rx { + u8 res0[40]; +} __attribute__ ((packed)) ucc_geth_thread_data_rx_t; + +/* Send Queue Queue-Descriptor */ +typedef struct ucc_geth_send_queue_qd { + u32 bd_ring_base; /* pointer to BD ring base address */ + u8 res0[0x8]; + u32 last_bd_completed_address;/* initialize to last entry in BD ring */ + u8 res1[0x30]; +} __attribute__ ((packed)) ucc_geth_send_queue_qd_t; + +typedef struct ucc_geth_send_queue_mem_region { + ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES]; +} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t; + +typedef struct ucc_geth_thread_tx_pram { + u8 res0[64]; +} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t; + +typedef struct ucc_geth_thread_rx_pram { + u8 res0[128]; +} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t; + +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 + +typedef struct ucc_geth_scheduler { + u16 cpucount0; /* CPU packet counter */ + u16 cpucount1; /* CPU packet counter */ + u16 cecount0; /* QE packet counter */ + u16 cecount1; /* QE packet counter */ + u16 cpucount2; /* CPU packet counter */ + u16 cpucount3; /* CPU packet counter */ + u16 cecount2; /* QE packet counter */ + u16 cecount3; /* QE packet counter */ + u16 cpucount4; /* CPU packet counter */ + u16 cpucount5; /* CPU packet counter */ + u16 cecount4; /* QE packet counter */ + u16 cecount5; /* QE packet counter */ + u16 cpucount6; /* CPU packet counter */ + u16 cpucount7; /* CPU packet counter */ + u16 cecount6; /* QE packet counter */ + u16 cecount7; /* QE packet counter */ + u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ + u32 rtsrshadow; /* temporary variable handled by QE */ + u32 time; /* temporary variable handled by QE */ + u32 ttl; /* temporary variable handled by QE */ + u32 mblinterval; /* max burst length interval */ + u16 nortsrbytetime; /* normalized value of byte time in tsr units */ + u8 fracsiz; /* radix 2 log value of denom. of + NorTSRByteTime */ + u8 res0[1]; + u8 strictpriorityq; /* Strict Priority Mask register */ + u8 txasap; /* Transmit ASAP register */ + u8 extrabw; /* Extra BandWidth register */ + u8 oldwfqmask; /* temporary variable handled by QE */ + u8 weightfactor[NUM_TX_QUEUES]; + /**< weight factor for queues */ + u32 minw; /* temporary variable handled by QE */ + u8 res1[0x70 - 0x64]; +} __attribute__ ((packed)) ucc_geth_scheduler_t; + +typedef struct ucc_geth_tx_firmware_statistics_pram { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_firmware_statistics_pram { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_entry { + u32 interruptcoalescingmaxvalue; /* interrupt coalescing max + value */ + u32 interruptcoalescingcounter; /* interrupt coalescing counter, + initialize to + interruptcoalescingmaxvalue */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_table { + ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES]; + /**< interrupt coalescing entry */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t; + +typedef struct ucc_geth_rx_prefetched_bds { + qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ +} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t; + +typedef struct ucc_geth_rx_bd_queues_entry { + u32 bdbaseptr; /* BD base pointer */ + u32 bdptr; /* BD pointer */ + u32 externalbdbaseptr; /* external BD base pointer */ + u32 externalbdptr; /* external BD pointer */ +} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t; + +typedef struct ucc_geth_tx_global_pram { + u16 temoder; + u8 res0[0x38 - 0x02]; + u32 sqptr; /* a base pointer to send queue memory region */ + u32 schedulerbasepointer; /* a base pointer to scheduler memory + region */ + u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ + u32 tstate; /* tx internal state. High byte contains + function code */ + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ + u8 res2[0x80 - 0x74]; +} __attribute__ ((packed)) ucc_geth_tx_global_pram_t; + +/* structure representing Extended Filtering Global Parameters in PRAM */ +typedef struct ucc_geth_exf_global_pram { + u32 l2pcdptr; /* individual address filter, high */ + u8 res0[0x10 - 0x04]; +} __attribute__ ((packed)) ucc_geth_exf_global_pram_t; + +typedef struct ucc_geth_rx_global_pram { + u32 remoder; /* ethernet mode reg. */ + u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ + u32 res0[0x1]; + u8 res1[0x20 - 0xC]; + u16 typeorlen; /* cutoff point less than which, type/len field + is considered length */ + u8 res2[0x1]; + u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ + u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ + u8 res3[0x30 - 0x28]; + u32 intcoalescingptr; /* Interrupt coalescing table pointer */ + u8 res4[0x36 - 0x34]; + u8 rstate; /* rx internal state. High byte contains + function code */ + u8 res5[0x46 - 0x37]; + u16 mrblr; /* max receive buffer length reg. */ + u32 rbdqptr; /* base pointer to RxBD parameter table + description */ + u16 mflr; /* max frame length reg. */ + u16 minflr; /* min frame length reg. */ + u16 maxd1; /* max dma1 length reg. */ + u16 maxd2; /* max dma2 length reg. */ + u32 ecamptr; /* external CAM address */ + u32 l2qt; /* VLAN priority mapping table. */ + u32 l3qt[0x8]; /* IP priority mapping table. */ + u16 vlantype; /* vlan type */ + u16 vlantci; /* default vlan tci */ + u8 addressfiltering[64]; /* address filtering data structure */ + u32 exfGlobalParam; /* base address for extended filtering global + parameters */ + u8 res6[0x100 - 0xC4]; /* Initialize to zero */ +} __attribute__ ((packed)) ucc_geth_rx_global_pram_t; + +#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 + +/* structure representing InitEnet command */ +typedef struct ucc_geth_init_pram { + u8 resinit1; + u8 resinit2; + u8 resinit3; + u8 resinit4; + u16 resinit5; + u8 res1[0x1]; + u8 largestexternallookupkeysize; + u32 rgftgfrxglobal; + u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ + u8 res2[0x38 - 0x30]; + u32 txglobal; /* tx global */ + u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ + u8 res3[0x1]; +} __attribute__ ((packed)) ucc_geth_init_pram_t; + +#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) +#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) + +#define ENET_INIT_PARAM_RISC_MASK 0x0000003f +#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 +#define ENET_INIT_PARAM_SNUM_MASK 0xff000000 +#define ENET_INIT_PARAM_SNUM_SHIFT 24 + +#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 +#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 +#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff +#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 +#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 + +/* structure representing 82xx Address Filtering Enet Address in PRAM */ +typedef struct ucc_geth_82xx_enet_address { + u8 res1[0x2]; + u16 h; /* address (MSB) */ + u16 m; /* address */ + u16 l; /* address (LSB) */ +} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t; + +/* structure representing 82xx Address Filtering PRAM */ +typedef struct ucc_geth_82xx_address_filtering_pram { + u32 iaddr_h; /* individual address filter, high */ + u32 iaddr_l; /* individual address filter, low */ + u32 gaddr_h; /* group address filter, high */ + u32 gaddr_l; /* group address filter, low */ + ucc_geth_82xx_enet_address_t taddr; + ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS]; + u8 res0[0x40 - 0x38]; +} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t; + +/* GETH Tx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_tx_firmware_statistics { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t; + +/* GETH Rx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_rx_firmware_statistics { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t; + +/* GETH hardware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_hardware_statistics { + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ +} __attribute__ ((packed)) ucc_geth_hardware_statistics_t; + +/* UCC GETH Tx errors returned via TxConf callback */ +#define TX_ERRORS_DEF 0x0200 +#define TX_ERRORS_EXDEF 0x0100 +#define TX_ERRORS_LC 0x0080 +#define TX_ERRORS_RL 0x0040 +#define TX_ERRORS_RC_MASK 0x003C +#define TX_ERRORS_RC_SHIFT 2 +#define TX_ERRORS_UN 0x0002 +#define TX_ERRORS_CSL 0x0001 + +/* UCC GETH Rx errors returned via RxStore callback */ +#define RX_ERRORS_CMR 0x0200 +#define RX_ERRORS_M 0x0100 +#define RX_ERRORS_BC 0x0080 +#define RX_ERRORS_MC 0x0040 + +/* Transmit BD. These are in addition to values defined in uccf. */ +#define T_VID 0x003c0000 /* insert VLAN id index mask. */ +#define T_DEF (((u32) TX_ERRORS_DEF ) << 16) +#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) +#define T_LC (((u32) TX_ERRORS_LC ) << 16) +#define T_RL (((u32) TX_ERRORS_RL ) << 16) +#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) +#define T_UN (((u32) TX_ERRORS_UN ) << 16) +#define T_CSL (((u32) TX_ERRORS_CSL ) << 16) +#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ + | T_UN | T_CSL) /* transmit errors to report */ + +/* Receive BD. These are in addition to values defined in uccf. */ +#define R_LG 0x00200000 /* Frame length violation. */ +#define R_NO 0x00100000 /* Non-octet aligned frame. */ +#define R_SH 0x00080000 /* Short frame. */ +#define R_CR 0x00040000 /* CRC error. */ +#define R_OV 0x00020000 /* Overrun. */ +#define R_IPCH 0x00010000 /* IP checksum check failed. */ +#define R_CMR (((u32) RX_ERRORS_CMR ) << 16) +#define R_M (((u32) RX_ERRORS_M ) << 16) +#define R_BC (((u32) RX_ERRORS_BC ) << 16) +#define R_MC (((u32) RX_ERRORS_MC ) << 16) +#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to + report */ +#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ + R_OV | R_IPCH) /* receive errors to discard */ + +/* Alignments */ +#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 +#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 +#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values + based on num of + threads, but always + using the maximum is + easier */ +#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 +#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a + guess */ +#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ +#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ +#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This + is a + guess + */ +#define UCC_GETH_RX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_TX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_MRBLR_ALIGNMENT 128 +#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 +#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 +#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 + +#define UCC_GETH_TAD_EF 0x80 +#define UCC_GETH_TAD_V 0x40 +#define UCC_GETH_TAD_REJ 0x20 +#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 +#define UCC_GETH_TAD_VTAG_OP_SHIFT 6 +#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 +#define UCC_GETH_TAD_RQOS_SHIFT 0 +#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 +#define UCC_GETH_TAD_CFI 0x10 + +#define UCC_GETH_VLAN_PRIORITY_MAX 8 +#define UCC_GETH_IP_PRIORITY_MAX 64 +#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 +#define UCC_GETH_RX_BD_RING_SIZE_MIN 8 +#define UCC_GETH_TX_BD_RING_SIZE_MIN 2 + +#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD + +/* Driver definitions */ +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x10 +#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN + +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) + +#define ENET_NUM_OCTETS_PER_ADDRESS 6 +#define ENET_GROUP_ADDR 0x01 /* Group address mask + for ethernet + addresses */ + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 100000 +#define PHY_INIT_TIMEOUT 100000 +#define PHY_CHANGE_TIME 2 + +/* Fast Ethernet (10/100 Mbps) */ +#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size + */ +#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ +#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ +#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size + */ +#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ +#define UCC_GETH_UTFTT_INIT 128 +/* Gigabit Ethernet (1000 Mbps) */ +#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual + FIFO size */ +#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ +#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ +#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual + FIFO size */ +#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ +#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ + +#define UCC_GETH_REMODER_INIT 0 /* bits that must be + set */ +#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ +#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value + for this + register */ +#define UCC_GETH_MACCFG1_INIT 0 +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \ + (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112) + +/* Ethernet speed */ +typedef enum enet_speed { + ENET_SPEED_10BT, /* 10 Base T */ + ENET_SPEED_100BT, /* 100 Base T */ + ENET_SPEED_1000BT /* 1000 Base T */ +} enet_speed_e; + +/* Ethernet Address Type. */ +typedef enum enet_addr_type { + ENET_ADDR_TYPE_INDIVIDUAL, + ENET_ADDR_TYPE_GROUP, + ENET_ADDR_TYPE_BROADCAST +} enet_addr_type_e; + +/* TBI / MII Set Register */ +typedef enum enet_tbi_mii_reg { + ENET_TBI_MII_CR = 0x00, /* Control (CR ) */ + ENET_TBI_MII_SR = 0x01, /* Status (SR ) */ + ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */ + ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability + (ANLPBPA) */ + ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */ + ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */ + ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page + (ANLPANP) */ + ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */ + ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */ + ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */ +} enet_tbi_mii_reg_e; + +/* UCC GETH 82xx Ethernet Address Recognition Location */ +typedef enum ucc_geth_enet_address_recognition_location { + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station + address */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional + station + address + paddr1 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional + station + address + paddr2 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional + station + address + paddr3 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional + station + address + paddr4 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual + hash */ +} ucc_geth_enet_address_recognition_location_e; + +/* UCC GETH vlan operation tagged */ +typedef enum ucc_geth_vlan_operation_tagged { + UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ + UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG + = 0x1, /* Tagged - replace vid portion of q tag */ + UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE + = 0x2, /* Tagged - if vid0 replace vid with default value */ + UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME + = 0x3 /* Tagged - extract q tag from frame */ +} ucc_geth_vlan_operation_tagged_e; + +/* UCC GETH vlan operation non-tagged */ +typedef enum ucc_geth_vlan_operation_non_tagged { + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - + q tag insert + */ +} ucc_geth_vlan_operation_non_tagged_e; + +/* UCC GETH Rx Quality of Service Mode */ +typedef enum ucc_geth_qos_mode { + UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue + determined + by L2 + criteria */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue + determined + by L3 + criteria */ +} ucc_geth_qos_mode_e; + +/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together + for combined functionality */ +typedef enum ucc_geth_statistics_gathering_mode { + UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No + statistics + gathering */ + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable + hardware + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable + firmware + tx + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable + firmware + rx + statistics + gathering + */ +} ucc_geth_statistics_gathering_mode_e; + +/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ +typedef enum ucc_geth_maccfg2_pad_and_crc_mode { + UCC_GETH_PAD_AND_CRC_MODE_NONE + = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding + short frames + nor CRC */ + UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY + = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append + CRC only */ + UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = + MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC +} ucc_geth_maccfg2_pad_and_crc_mode_e; + +/* UCC GETH upsmr Flow Control Mode */ +typedef enum ucc_geth_flow_control_mode { + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic + flow control + */ + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY + = 0x00004000 /* Send pause frame when RxFIFO reaches its + emergency threshold */ +} ucc_geth_flow_control_mode_e; + +/* UCC GETH number of threads */ +typedef enum ucc_geth_num_of_threads { + UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ + UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ + UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ + UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ + UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ +} ucc_geth_num_of_threads_e; + +/* UCC GETH number of station addresses */ +typedef enum ucc_geth_num_of_station_addresses { + UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ + UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ +} ucc_geth_num_of_station_addresses_e; + +typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS]; + +/* UCC GETH 82xx Ethernet Address Container */ +typedef struct enet_addr_container { + enet_addr_t address; /* ethernet address */ + ucc_geth_enet_address_recognition_location_e location; /* location in + 82xx address + recognition + hardware */ + struct list_head node; +} enet_addr_container_t; + +#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node) + +/* UCC GETH Termination Action Descriptor (TAD) structure. */ +typedef struct ucc_geth_tad_params { + int rx_non_dynamic_extended_features_mode; + int reject_frame; + ucc_geth_vlan_operation_tagged_e vtag_op; + ucc_geth_vlan_operation_non_tagged_e vnontag_op; + ucc_geth_qos_mode_e rqos; + u8 vpri; + u16 vid; +} ucc_geth_tad_params_t; + +/* GETH protocol initialization structure */ +typedef struct ucc_geth_info { + ucc_fast_info_t uf_info; + u8 numQueuesTx; + u8 numQueuesRx; + int ipCheckSumCheck; + int ipCheckSumGenerate; + int rxExtendedFiltering; + u32 extendedFilteringChainPointer; + u16 typeorlen; + int dynamicMaxFrameLength; + int dynamicMinFrameLength; + u8 nonBackToBackIfgPart1; + u8 nonBackToBackIfgPart2; + u8 miminumInterFrameGapEnforcement; + u8 backToBackInterFrameGap; + int ipAddressAlignment; + int lengthCheckRx; + u32 mblinterval; + u16 nortsrbytetime; + u8 fracsiz; + u8 strictpriorityq; + u8 txasap; + u8 extrabw; + int miiPreambleSupress; + u8 altBebTruncation; + int altBeb; + int backPressureNoBackoff; + int noBackoff; + int excessDefer; + u8 maxRetransmission; + u8 collisionWindow; + int pro; + int cap; + int rsh; + int rlpb; + int cam; + int bro; + int ecm; + int receiveFlowControl; + u8 maxGroupAddrInHash; + u8 maxIndAddrInHash; + u8 prel; + u16 maxFrameLength; + u16 minFrameLength; + u16 maxD1Length; + u16 maxD2Length; + u16 vlantype; + u16 vlantci; + u32 ecamptr; + u32 eventRegMask; + u16 pausePeriod; + u16 extensionField; + u8 phy_address; + u32 board_flags; + u32 phy_interrupt; + u8 weightfactor[NUM_TX_QUEUES]; + u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; + u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; + u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; + u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u16 bdRingLenTx[NUM_TX_QUEUES]; + u16 bdRingLenRx[NUM_RX_QUEUES]; + enet_interface_e enet_interface; + ucc_geth_num_of_station_addresses_e numStationAddresses; + qe_fltr_largest_external_tbl_lookup_key_size_e + largestexternallookupkeysize; + ucc_geth_statistics_gathering_mode_e statisticsMode; + ucc_geth_vlan_operation_tagged_e vlanOperationTagged; + ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged; + ucc_geth_qos_mode_e rxQoSMode; + ucc_geth_flow_control_mode_e aufc; + ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc; + ucc_geth_num_of_threads_e numThreadsTx; + ucc_geth_num_of_threads_e numThreadsRx; + qe_risc_allocation_e riscTx; + qe_risc_allocation_e riscRx; +} ucc_geth_info_t; + +/* structure representing UCC GETH */ +typedef struct ucc_geth_private { + ucc_geth_info_t *ug_info; + ucc_fast_private_t *uccf; + struct net_device *dev; + struct net_device_stats stats; /* linux network statistics */ + ucc_geth_t *ug_regs; + ucc_geth_init_pram_t *p_init_enet_param_shadow; + ucc_geth_exf_global_pram_t *p_exf_glbl_param; + u32 exf_glbl_param_offset; + ucc_geth_rx_global_pram_t *p_rx_glbl_pram; + u32 rx_glbl_pram_offset; + ucc_geth_tx_global_pram_t *p_tx_glbl_pram; + u32 tx_glbl_pram_offset; + ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg; + u32 send_q_mem_reg_offset; + ucc_geth_thread_data_tx_t *p_thread_data_tx; + u32 thread_dat_tx_offset; + ucc_geth_thread_data_rx_t *p_thread_data_rx; + u32 thread_dat_rx_offset; + ucc_geth_scheduler_t *p_scheduler; + u32 scheduler_offset; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + u32 tx_fw_statistics_pram_offset; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + u32 rx_fw_statistics_pram_offset; + ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl; + u32 rx_irq_coalescing_tbl_offset; + ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl; + u32 rx_bd_qs_tbl_offset; + u8 *p_tx_bd_ring[NUM_TX_QUEUES]; + u32 tx_bd_ring_offset[NUM_TX_QUEUES]; + u8 *p_rx_bd_ring[NUM_RX_QUEUES]; + u32 rx_bd_ring_offset[NUM_RX_QUEUES]; + u8 *confBd[NUM_TX_QUEUES]; + u8 *txBd[NUM_TX_QUEUES]; + u8 *rxBd[NUM_RX_QUEUES]; + int badFrame[NUM_RX_QUEUES]; + u16 cpucount[NUM_TX_QUEUES]; + volatile u16 *p_cpucount[NUM_TX_QUEUES]; + int indAddrRegUsed[NUM_OF_PADDRS]; + enet_addr_t paddr[NUM_OF_PADDRS]; + u8 numGroupAddrInHash; + u8 numIndAddrInHash; + u8 numIndAddrInReg; + int rx_extended_features; + int rx_non_dynamic_extended_features; + struct list_head conf_skbs; + struct list_head group_hash_q; + struct list_head ind_hash_q; + u32 saved_uccm; + spinlock_t lock; + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; + struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx[NUM_TX_QUEUES]; + u16 skb_currx[NUM_RX_QUEUES]; + /* index of the first skb which hasn't been transmitted yet. */ + u16 skb_dirtytx[NUM_TX_QUEUES]; + + struct work_struct tq; + struct timer_list phy_info_timer; + struct ugeth_mii_info *mii_info; + int oldspeed; + int oldduplex; + int oldlink; +} ucc_geth_private_t; + +#endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c new file mode 100644 index 000000000000..f91028c5386d --- /dev/null +++ b/drivers/net/ucc_geth_phy.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/ethtool.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" +#include <platforms/83xx/mpc8360e_pb.h> + +#define ugphy_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugphy_dbg(format, arg...) \ + ugphy_printk(KERN_DEBUG, format , ## arg) +#define ugphy_err(format, arg...) \ + ugphy_printk(KERN_ERR, format , ## arg) +#define ugphy_info(format, arg...) \ + ugphy_printk(KERN_INFO, format , ## arg) +#define ugphy_warn(format, arg...) \ + ugphy_printk(KERN_WARNING, format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugphy_vdbg ugphy_dbg +#else +#define ugphy_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static void config_genmii_advert(struct ugeth_mii_info *mii_info); +static void genmii_setup_forced(struct ugeth_mii_info *mii_info); +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info); +static int gbit_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_update_link(struct ugeth_mii_info *mii_info); +static int genmii_read_status(struct ugeth_mii_info *mii_info); +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum); +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val); + +static u8 *bcsr_regs = NULL; + +/* Write value to the PHY for this device to the register at regnum, */ +/* waiting until the write is done before it returns. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + /* Stop the MII management read cycle */ + out_be32(&mii_regs->miimcom, 0); + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Setting up the MII Mangement Control Register with the value */ + out_be32(&mii_regs->miimcon, (u32) value); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + spin_unlock_irq(&ugeth->lock); + + udelay(10000); +} + +/* Reads from register regnum in the PHY for device dev, */ +/* returning the value. Clears miimcom first. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +int read_phy_reg(struct net_device *dev, int mii_id, int regnum) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + u16 value; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Perform an MII management read cycle */ + out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + udelay(10000); + + /* Read MII management status */ + value = (u16) in_be32(&mii_regs->miimstat); + out_be32(&mii_regs->miimcom, 0); + if (value == 0xffff) + ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x", + mii_id, mii_reg, (u32) & (mii_regs->miimcfg)); + + spin_unlock_irq(&ugeth->lock); + + return (value); +} + +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->phyinfo->ack_interrupt) + mii_info->phyinfo->ack_interrupt(mii_info); +} + +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + mii_info->interrupts = interrupts; + if (mii_info->phyinfo->config_intr) + mii_info->phyinfo->config_intr(mii_info); +} + +/* Writes MII_ADVERTISE with the appropriate values, after + * sanitizing advertise to make sure only supported features + * are advertised + */ +static void config_genmii_advert(struct ugeth_mii_info *mii_info) +{ + u32 advertise; + u16 adv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Only allow advertising what this PHY supports */ + mii_info->advertising &= mii_info->phyinfo->features; + advertise = mii_info->advertising; + + /* Setup standard advertisement */ + adv = phy_read(mii_info, MII_ADVERTISE); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + phy_write(mii_info, MII_ADVERTISE, adv); +} + +static void genmii_setup_forced(struct ugeth_mii_info *mii_info) +{ + u16 ctrl; + u32 features = mii_info->phyinfo->features; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctrl = phy_read(mii_info, MII_BMCR); + + ctrl &= + ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + ctrl |= BMCR_RESET; + + switch (mii_info->speed) { + case SPEED_1000: + if (features & (SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full)) { + ctrl |= BMCR_SPEED1000; + break; + } + mii_info->speed = SPEED_100; + case SPEED_100: + if (features & (SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full)) { + ctrl |= BMCR_SPEED100; + break; + } + mii_info->speed = SPEED_10; + case SPEED_10: + if (features & (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full)) + break; + default: /* Unsupported speed! */ + ugphy_err("%s: Bad speed!", mii_info->dev->name); + break; + } + + phy_write(mii_info, MII_BMCR, ctrl); +} + +/* Enable and Restart Autonegotiation */ +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info) +{ + u16 ctl; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctl = phy_read(mii_info, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(mii_info, MII_BMCR, ctl); +} + +static int gbit_config_aneg(struct ugeth_mii_info *mii_info) +{ + u16 adv; + u32 advertise; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + /* Configure the ADVERTISE register */ + config_genmii_advert(mii_info); + advertise = mii_info->advertising; + + adv = phy_read(mii_info, MII_1000BASETCONTROL); + adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | + MII_1000BASETCONTROL_HALFDUPLEXCAP); + if (advertise & SUPPORTED_1000baseT_Half) + adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; + if (advertise & SUPPORTED_1000baseT_Full) + adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; + phy_write(mii_info, MII_1000BASETCONTROL, adv); + + /* Start/Restart aneg */ + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + config_genmii_advert(mii_info); + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_update_link(struct ugeth_mii_info *mii_info) +{ + u16 status; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Do a fake read */ + phy_read(mii_info, MII_BMSR); + + /* Read link and autonegotiation status */ + status = phy_read(mii_info, MII_BMSR); + if ((status & BMSR_LSTATUS) == 0) + mii_info->link = 0; + else + mii_info->link = 1; + + /* If we are autonegotiating, and not done, + * return an error */ + if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE)) + return -EAGAIN; + + return 0; +} + +static int genmii_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + if (mii_info->autoneg) { + status = phy_read(mii_info, MII_LPA); + + if (status & (LPA_10FULL | LPA_100FULL)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + if (status & (LPA_100FULL | LPA_100HALF)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + mii_info->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +static int marvell_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, 0x14, 0x0cd2); + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + msleep(4000); + + return 0; +} + +static int marvell_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* The Marvell PHY has an errata which requires + * that certain registers get written in order + * to restart autonegotiation */ + phy_write(mii_info, MII_BMCR, BMCR_RESET); + + phy_write(mii_info, 0x1d, 0x1f); + phy_write(mii_info, 0x1e, 0x200c); + phy_write(mii_info, 0x1d, 0x5); + phy_write(mii_info, 0x1e, 0); + phy_write(mii_info, 0x1e, 0x100); + + gbit_config_aneg(mii_info); + + return 0; +} + +static int marvell_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS); + + /* Get the duplexity */ + if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + /* Get the speed */ + speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK; + switch (speed) { + case MII_M1011_PHY_SPEC_STATUS_1000: + mii_info->speed = SPEED_1000; + break; + case MII_M1011_PHY_SPEC_STATUS_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + mii_info->pause = 0; + } + + return 0; +} + +static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_M1011_IEVENT); + + return 0; +} + +static int marvell_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT); + else + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); + + return 0; +} + +static int cis820x_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, + MII_CIS8201_AUXCONSTAT_INIT); + phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); + + return 0; +} + +static int cis820x_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + + status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT); + if (status & MII_CIS8201_AUXCONSTAT_DUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + speed = status & MII_CIS8201_AUXCONSTAT_SPEED; + + switch (speed) { + case MII_CIS8201_AUXCONSTAT_GBIT: + mii_info->speed = SPEED_1000; + break; + case MII_CIS8201_AUXCONSTAT_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + } + + return 0; +} + +static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_read(mii_info, MII_CIS8201_ISTAT); + + return 0; +} + +static int cis820x_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); + else + phy_write(mii_info, MII_CIS8201_IMASK, 0); + + return 0; +} + +#define DM9161_DELAY 10 + +static int dm9161_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + status = phy_read(mii_info, MII_DM9161_SCSR); + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + } + + return 0; +} + +static int dm9161_config_aneg(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (0 == priv->resetdone) + return -EAGAIN; + + return 0; +} + +static void dm9161_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + struct dm9161_private *priv = mii_info->priv; + u16 status = phy_read(mii_info, MII_BMSR); + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (status & BMSR_ANEGCOMPLETE) { + priv->resetdone = 1; + } else + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); +} + +static int dm9161_init(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Allocate the private data structure */ + priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL); + + if (NULL == priv) + return -ENOMEM; + + mii_info->priv = priv; + + /* Reset is not done yet */ + priv->resetdone = 0; + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE); + + config_genmii_advert(mii_info); + /* Start/Restart aneg */ + genmii_config_aneg(mii_info); + + /* Start a timer for DM9161_DELAY seconds to wait + * for the PHY to be ready */ + init_timer(&priv->timer); + priv->timer.function = &dm9161_timer; + priv->timer.data = (unsigned long)mii_info; + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); + + return 0; +} + +static void dm9161_close(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + del_timer_sync(&priv->timer); + kfree(priv); +} + +static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] |= 0x40; + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_DM9161_INTR); + + + return 0; +} + +static int dm9161_config_intr(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) { + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] &= ~0x40; + } + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT); + else + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP); + + return 0; +} + +/* Cicada 820x */ +static struct phy_info phy_info_cis820x = { + .phy_id = 0x000fc440, + .name = "Cicada Cis8204", + .phy_id_mask = 0x000fffc0, + .features = MII_GBIT_FEATURES, + .init = &cis820x_init, + .config_aneg = &gbit_config_aneg, + .read_status = &cis820x_read_status, + .ack_interrupt = &cis820x_ack_interrupt, + .config_intr = &cis820x_config_intr, +}; + +static struct phy_info phy_info_dm9161 = { + .phy_id = 0x0181b880, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161E", + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .close = dm9161_close, +}; + +static struct phy_info phy_info_dm9161a = { + .phy_id = 0x0181b8a0, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161A", + .features = MII_BASIC_FEATURES, + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .ack_interrupt = dm9161_ack_interrupt, + .config_intr = dm9161_config_intr, + .close = dm9161_close, +}; + +static struct phy_info phy_info_marvell = { + .phy_id = 0x01410c00, + .phy_id_mask = 0xffffff00, + .name = "Marvell 88E11x1", + .features = MII_GBIT_FEATURES, + .init = &marvell_init, + .config_aneg = &marvell_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, +}; + +static struct phy_info phy_info_genmii = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .features = MII_BASIC_FEATURES, + .config_aneg = genmii_config_aneg, + .read_status = genmii_read_status, +}; + +static struct phy_info *phy_info[] = { + &phy_info_cis820x, + &phy_info_marvell, + &phy_info_dm9161, + &phy_info_dm9161a, + &phy_info_genmii, + NULL +}; + +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum) +{ + u16 retval; + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); + + return retval; +} + +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val) +{ + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); +} + +/* Use the PHY ID registers to determine what type of PHY is attached + * to device dev. return a struct phy_info structure describing that PHY + */ +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info) +{ + u16 phy_reg; + u32 phy_ID; + int i; + struct phy_info *theInfo = NULL; + struct net_device *dev = mii_info->dev; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Grab the bits from PHYIR1, and put them in the upper half */ + phy_reg = phy_read(mii_info, MII_PHYSID1); + phy_ID = (phy_reg & 0xffff) << 16; + + /* Grab the bits from PHYIR2, and put them in the lower half */ + phy_reg = phy_read(mii_info, MII_PHYSID2); + phy_ID |= (phy_reg & 0xffff); + + /* loop through all the known PHY types, and find one that */ + /* matches the ID we read from the PHY. */ + for (i = 0; phy_info[i]; i++) + if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){ + theInfo = phy_info[i]; + break; + } + + /* This shouldn't happen, as we have generic PHY support */ + if (theInfo == NULL) { + ugphy_info("%s: PHY id %x is not supported!", dev->name, + phy_ID); + return NULL; + } else { + ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name, + phy_ID); + } + + return theInfo; +} diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h new file mode 100644 index 000000000000..2f98b8f1bb0a --- /dev/null +++ b/drivers/net/ucc_geth_phy.h @@ -0,0 +1,217 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __UCC_GETH_PHY_H__ +#define __UCC_GETH_PHY_H__ + +#define MII_end ((u32)-2) +#define MII_read ((u32)-1) + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define UGETH_AN_TIMEOUT 2000 + +/* 1000BT control (Marvell & BCM54xx at least) */ +#define MII_1000BASETCONTROL 0x09 +#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 +#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 + +/* Cicada Extended Control Register 1 */ +#define MII_CIS8201_EXT_CON1 0x17 +#define MII_CIS8201_EXTCON1_INIT 0x0000 + +/* Cicada Interrupt Mask Register */ +#define MII_CIS8201_IMASK 0x19 +#define MII_CIS8201_IMASK_IEN 0x8000 +#define MII_CIS8201_IMASK_SPEED 0x4000 +#define MII_CIS8201_IMASK_LINK 0x2000 +#define MII_CIS8201_IMASK_DUPLEX 0x1000 +#define MII_CIS8201_IMASK_MASK 0xf000 + +/* Cicada Interrupt Status Register */ +#define MII_CIS8201_ISTAT 0x1a +#define MII_CIS8201_ISTAT_STATUS 0x8000 +#define MII_CIS8201_ISTAT_SPEED 0x4000 +#define MII_CIS8201_ISTAT_LINK 0x2000 +#define MII_CIS8201_ISTAT_DUPLEX 0x1000 + +/* Cicada Auxiliary Control/Status Register */ +#define MII_CIS8201_AUX_CONSTAT 0x1c +#define MII_CIS8201_AUXCONSTAT_INIT 0x0004 +#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 +#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 +#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 +#define MII_CIS8201_AUXCONSTAT_100 0x0008 + +/* 88E1011 PHY Status Register */ +#define MII_M1011_PHY_SPEC_STATUS 0x11 +#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 +#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 +#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 +#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 +#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 +#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400 + +#define MII_M1011_IEVENT 0x13 +#define MII_M1011_IEVENT_CLEAR 0x0000 + +#define MII_M1011_IMASK 0x12 +#define MII_M1011_IMASK_INIT 0x6400 +#define MII_M1011_IMASK_CLEAR 0x0000 + +#define MII_DM9161_SCR 0x10 +#define MII_DM9161_SCR_INIT 0x0610 + +/* DM9161 Specified Configuration and Status Register */ +#define MII_DM9161_SCSR 0x11 +#define MII_DM9161_SCSR_100F 0x8000 +#define MII_DM9161_SCSR_100H 0x4000 +#define MII_DM9161_SCSR_10F 0x2000 +#define MII_DM9161_SCSR_10H 0x1000 + +/* DM9161 Interrupt Register */ +#define MII_DM9161_INTR 0x15 +#define MII_DM9161_INTR_PEND 0x8000 +#define MII_DM9161_INTR_DPLX_MASK 0x0800 +#define MII_DM9161_INTR_SPD_MASK 0x0400 +#define MII_DM9161_INTR_LINK_MASK 0x0200 +#define MII_DM9161_INTR_MASK 0x0100 +#define MII_DM9161_INTR_DPLX_CHANGE 0x0010 +#define MII_DM9161_INTR_SPD_CHANGE 0x0008 +#define MII_DM9161_INTR_LINK_CHANGE 0x0004 +#define MII_DM9161_INTR_INIT 0x0000 +#define MII_DM9161_INTR_STOP \ +(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ + | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) + +/* DM9161 10BT Configuration/Status */ +#define MII_DM9161_10BTCSR 0x12 +#define MII_DM9161_10BTCSR_INIT 0x7800 + +#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_MII) + +#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full) + +#define MII_READ_COMMAND 0x00000001 + +#define MII_INTERRUPT_DISABLED 0x0 +#define MII_INTERRUPT_ENABLED 0x1 +/* Taken from mii_if_info and sungem_phy.h */ +struct ugeth_mii_info { + /* Information about the PHY type */ + /* And management functions */ + struct phy_info *phyinfo; + + ucc_mii_mng_t *mii_regs; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + + /* The most recently read link state */ + int link; + + /* Enabled Interrupts */ + u32 interrupts; + + u32 advertising; + int autoneg; + int mii_id; + + /* private data pointer */ + /* For use by PHYs to maintain extra state */ + void *priv; + + /* Provided by host chip */ + struct net_device *dev; + + /* A lock to ensure that only one thing can read/write + * the MDIO bus at a time */ + spinlock_t mdio_lock; + + /* Provided by ethernet driver */ + int (*mdio_read) (struct net_device * dev, int mii_id, int reg); + void (*mdio_write) (struct net_device * dev, int mii_id, int reg, + int val); +}; + +/* struct phy_info: a structure which defines attributes for a PHY + * + * id will contain a number which represents the PHY. During + * startup, the driver will poll the PHY to find out what its + * UID--as defined by registers 2 and 3--is. The 32-bit result + * gotten from the PHY will be ANDed with phy_id_mask to + * discard any bits which may change based on revision numbers + * unimportant to functionality + * + * There are 6 commands which take a ugeth_mii_info structure. + * Each PHY must declare config_aneg, and read_status. + */ +struct phy_info { + u32 phy_id; + char *name; + unsigned int phy_id_mask; + u32 features; + + /* Called to initialize the PHY */ + int (*init) (struct ugeth_mii_info * mii_info); + + /* Called to suspend the PHY for power */ + int (*suspend) (struct ugeth_mii_info * mii_info); + + /* Reconfigures autonegotiation (or disables it) */ + int (*config_aneg) (struct ugeth_mii_info * mii_info); + + /* Determines the negotiated speed and duplex */ + int (*read_status) (struct ugeth_mii_info * mii_info); + + /* Clears any pending interrupts */ + int (*ack_interrupt) (struct ugeth_mii_info * mii_info); + + /* Enables or disables interrupts */ + int (*config_intr) (struct ugeth_mii_info * mii_info); + + /* Clears up any memory if needed */ + void (*close) (struct ugeth_mii_info * mii_info); +}; + +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info); +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value); +int read_phy_reg(struct net_device *dev, int mii_id, int regnum); +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info); +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts); + +struct dm9161_private { + struct timer_list timer; + int resetdone; +}; + +#endif /* __UCC_GETH_PHY_H__ */ diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index d3d0ec970318..ae971080e2e4 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -30,8 +30,8 @@ */ #define DRV_NAME "via-rhine" -#define DRV_VERSION "1.4.0" -#define DRV_RELDATE "June-27-2006" +#define DRV_VERSION "1.4.1" +#define DRV_RELDATE "July-24-2006" /* A few user-configurable values. @@ -44,6 +44,10 @@ static int max_interrupt_work = 20; Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; +/* Work-around for broken BIOSes: they are unable to get the chip back out of + power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ +static int avoid_D3; + /* * In case you are looking for 'options[]' or 'full_duplex[]', they * are gone. Use ethtool(8) instead. @@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32; There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ +#ifdef CONFIG_VIA_RHINE_NAPI +#define RX_RING_SIZE 64 +#else #define RX_RING_SIZE 16 +#endif /* Operational parameters that usually are not changed. */ @@ -116,9 +124,11 @@ MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); +module_param(avoid_D3, bool, 0); MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); /* Theory of Operation @@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void rhine_tx(struct net_device *dev); -static void rhine_rx(struct net_device *dev); +static int rhine_rx(struct net_device *dev, int limit); static void rhine_error(struct net_device *dev, int intr_status); static void rhine_set_rx_mode(struct net_device *dev); static struct net_device_stats *rhine_get_stats(struct net_device *dev); @@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev) } #endif +#ifdef CONFIG_VIA_RHINE_NAPI +static int rhine_napipoll(struct net_device *dev, int *budget) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int done, limit = min(dev->quota, *budget); + + done = rhine_rx(dev, limit); + *budget -= done; + dev->quota -= done; + + if (done < limit) { + netif_rx_complete(dev); + + iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | + IntrRxDropped | IntrRxNoBuf | IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + return 0; + } + else + return 1; +} +#endif + static void rhine_hw_init(struct net_device *dev, long pioaddr) { struct rhine_private *rp = netdev_priv(dev); @@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll; #endif +#ifdef CONFIG_VIA_RHINE_NAPI + dev->poll = rhine_napipoll; + dev->weight = 64; +#endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, } } rp->mii_if.phy_id = phy_id; + if (debug > 1 && avoid_D3) + printk(KERN_INFO "%s: No D3 power state at shutdown.\n", + dev->name); return 0; @@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); + netif_poll_enable(dev); + /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | @@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs * dev->name, intr_status); if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | - IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) - rhine_rx(dev); + IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { +#ifdef CONFIG_VIA_RHINE_NAPI + iowrite16(IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + + netif_rx_schedule(dev); +#else + rhine_rx(dev, RX_RING_SIZE); +#endif + } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { if (intr_status & IntrTxErrSummary) { @@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev) spin_unlock(&rp->lock); } -/* This routine is logically part of the interrupt handler, but isolated - for clarity and better register allocation. */ -static void rhine_rx(struct net_device *dev) +/* Process up to limit frames from receive ring */ +static int rhine_rx(struct net_device *dev, int limit) { struct rhine_private *rp = netdev_priv(dev); + int count; int entry = rp->cur_rx % RX_RING_SIZE; - int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx; if (debug > 4) { printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", @@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev) } /* If EOP is set on the next entry, it's a new packet. Send it up. */ - while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { + for (count = 0; count < limit; ++count) { struct rx_desc *desc = rp->rx_head_desc; u32 desc_status = le32_to_cpu(desc->rx_status); int data_size = desc_status >> 16; + if (desc_status & DescOwn) + break; + if (debug > 4) printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", desc_status); - if (--boguscnt < 0) - break; + if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { if ((desc_status & RxWholePkt) != RxWholePkt) { printk(KERN_WARNING "%s: Oversized Ethernet " @@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev) PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); +#ifdef CONFIG_VIA_RHINE_NAPI + netif_receive_skb(skb); +#else netif_rx(skb); +#endif dev->last_rx = jiffies; rp->stats.rx_bytes += pkt_len; rp->stats.rx_packets++; @@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev) } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } + + return count; } /* @@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); + netif_poll_disable(dev); if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev) } /* Hit power state D3 (sleep) */ - iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); + if (!avoid_D3) + iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); /* TODO: Check use of pci_enable_wake() */ diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f5b0078eb4ad..aa9cd92f46b2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs) if (PHYSR0 & PHYSR0_SPDG) status |= VELOCITY_SPEED_1000; - if (PHYSR0 & PHYSR0_SPD10) + else if (PHYSR0 & PHYSR0_SPD10) status |= VELOCITY_SPEED_10; else status |= VELOCITY_SPEED_100; @@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd u32 status; status = check_connection_type(vptr->mac_regs); - cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - if (status & VELOCITY_SPEED_100) + cmd->supported = SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (status & VELOCITY_SPEED_1000) + cmd->speed = SPEED_1000; + else if (status & VELOCITY_SPEED_100) cmd->speed = SPEED_100; else cmd->speed = SPEED_10; @@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); struct mac_regs __iomem * regs = vptr->mac_regs; - return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; } static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 435e91ec4620..6b63b350cd52 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page) static inline void set_carrier(port_t *port) { - if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) + if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) netif_carrier_on(port_to_dev(port)); else netif_carrier_off(port_to_dev(port)); @@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port) static void sca_msci_intr(port_t *port) { - u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ + u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ - /* Reset MSCI TX underrun status bit */ - sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); + /* Reset MSCI TX underrun and CDCD (ignored) status bit */ + sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); @@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port) stats->tx_fifo_errors++; } + stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 7caa8dc88a58..b1ba1872f315 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -500,8 +500,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index fa9d2c4edc93..2e8ac995d56f 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -447,6 +447,7 @@ config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) select CRYPTO + select CRYPTO_AES ---help--- This is the standard Linux driver to support Cisco/Aironet PCMCIA 802.11 wireless cards. This driver is the same as the Aironet diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 3889f79e7128..df317c1e12a8 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, } if (sec->flags & SEC_AUTH_MODE) { secinfo->auth_mode = sec->auth_mode; - dprintk(", .auth_mode = %d\n", sec->auth_mode); + dprintk(", .auth_mode = %d", sec->auth_mode); } dprintk("\n"); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index dafaa5ff5aa6..d500012fdc7a 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev) dev->name, local->fragm_threshold); } + /* Some firmwares lose antenna selection settings on reset */ + (void) hostap_set_antsel(local); + return res; } diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index d6ed5781b93a..317ace7f9aae 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (orinoco_lock(priv, &flags) != 0) return -EBUSY; - if (erq->pointer) { + if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; @@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; - if (erq->pointer) { + if (erq->pointer && erq->length > 0) { priv->keys[index].len = cpu_to_le16(xlen); memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data)); diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 7f78b7801fb3..bcc7038130f6 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle) u_int save_cor; /* Doing it if hardware is gone is guaranteed crash */ - if (pcmcia_dev_present(link)) + if (!pcmcia_dev_present(link)) return -ENODEV; /* Save original COR value */ diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 662ecc8a33ff..c52e9bcf8d02 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface, zd->dev->name); usb_set_intfdata(interface, zd); + zd1201_enable(zd); /* zd1201 likes to startup enabled, */ + zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_net: diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index efc9c4bd826f..da9d06bdb818 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -797,7 +797,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip) { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, { CR_ZD1211_RETRY_MAX, 0x2 }, { CR_SNIFFER_ON, 0 }, - { CR_RX_FILTER, AP_RX_FILTER }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_GROUP_HASH_P1, 0x00 }, { CR_GROUP_HASH_P2, 0x80000000 }, { CR_REG1, 0xa4 }, @@ -844,7 +844,7 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip) { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, { CR_ZD1211B_TXOP, 0x01800824 }, { CR_SNIFFER_ON, 0 }, - { CR_RX_FILTER, AP_RX_FILTER }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_GROUP_HASH_P1, 0x00 }, { CR_GROUP_HASH_P2, 0x80000000 }, { CR_REG1, 0xa4 }, diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index 805121093ab5..069d2b467339 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -461,10 +461,15 @@ #define CR_RX_FILTER CTL_REG(0x068c) #define RX_FILTER_ASSOC_RESPONSE 0x0002 +#define RX_FILTER_REASSOC_RESPONSE 0x0008 #define RX_FILTER_PROBE_RESPONSE 0x0020 #define RX_FILTER_BEACON 0x0100 +#define RX_FILTER_DISASSOC 0x0400 #define RX_FILTER_AUTH 0x0800 -/* Sniff modus sets filter to 0xfffff */ +#define AP_RX_FILTER 0x0400feff +#define STA_RX_FILTER 0x0000ffff + +/* Monitor mode sets filter to 0xfffff */ #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) @@ -546,9 +551,6 @@ #define CR_ZD1211B_TXOP CTL_REG(0x0b20) #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) -#define AP_RX_FILTER 0x0400feff -#define STA_RX_FILTER 0x0000ffff - #define CWIN_SIZE 0x007f043f diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 3bdc54d128d0..d6f3e02a0b54 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -108,7 +108,9 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) if (r) goto disable_int; - r = zd_set_encryption_type(chip, NO_WEP); + /* We must inform the device that we are doing encryption/decryption in + * software at the moment. */ + r = zd_set_encryption_type(chip, ENC_SNIFFER); if (r) goto disable_int; @@ -136,10 +138,8 @@ static int reset_mode(struct zd_mac *mac) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct zd_ioreq32 ioreqs[3] = { - { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| - RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_SNIFFER_ON, 0U }, - { CR_ENCRYPTION_TYPE, NO_WEP }, }; if (ieee->iw_mode == IW_MODE_MONITOR) { @@ -713,10 +713,10 @@ static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri) struct zd_rt_hdr { struct ieee80211_radiotap_header rt_hdr; u8 rt_flags; + u8 rt_rate; u16 rt_channel; u16 rt_chbitmask; - u16 rt_rate; -}; +} __attribute__((packed)); static void fill_rt_header(void *buffer, struct zd_mac *mac, const struct ieee80211_rx_stats *stats, @@ -735,14 +735,14 @@ static void fill_rt_header(void *buffer, struct zd_mac *mac, if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; + hdr->rt_rate = stats->rate / 5; + /* FIXME: 802.11a */ hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( _zd_chip_get_channel(&mac->chip))); hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); - - hdr->rt_rate = stats->rate / 5; } /* Returns 1 if the data packet is for us and 0 otherwise. */ diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 72f90525bf68..6320984126c7 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -323,7 +323,6 @@ static void disable_read_regs_int(struct zd_usb *usb) { struct zd_usb_interrupt *intr = &usb->intr; - ZD_ASSERT(in_interrupt()); spin_lock(&intr->lock); intr->read_regs_enabled = 0; spin_unlock(&intr->lock); @@ -545,11 +544,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, * be padded. Unaligned access might also happen if the length_info * structure is not present. */ - if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { + if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) + { unsigned int l, k, n; for (i = 0, l = 0;; i++) { - k = le16_to_cpu(get_unaligned( - &length_info->length[i])); + k = le16_to_cpu(get_unaligned(&length_info->length[i])); n = l+k; if (n > length) return; |