From fc32b0e28df6655a15b488aaddfc1339f82dc13a Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 2 Jun 2008 00:28:40 +0200 Subject: mv643xx_eth: general cleanup General cleanup of the mv643xx_eth driver. Mainly fixes coding style / indentation issues, get rid of some useless 'volatile's, kill some more superfluous comments, and such. Signed-off-by: Lennert Buytenhek Acked-by: Dale Farnsworth --- include/linux/mv643xx_eth.h | 59 +++++++++++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index a15cdd4a8e58..646177660495 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -17,30 +17,53 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; - unsigned int t_clk; + unsigned int t_clk; }; struct mv643xx_eth_platform_data { + /* + * Pointer back to our parent instance, and our port number. + */ struct platform_device *shared; - int port_number; + int port_number; + /* + * Whether a PHY is present, and if yes, at which address. + */ struct platform_device *shared_smi; + int force_phy_addr; + int phy_addr; - u16 force_phy_addr; /* force override if phy_addr == 0 */ - u16 phy_addr; - - /* If speed is 0, then speed and duplex are autonegotiated. */ - int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ - int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ - - /* non-zero values of the following fields override defaults */ - u32 tx_queue_size; - u32 rx_queue_size; - u32 tx_sram_addr; - u32 tx_sram_size; - u32 rx_sram_addr; - u32 rx_sram_size; - u8 mac_addr[6]; /* mac address if non-zero*/ + /* + * Use this MAC address if it is valid, overriding the + * address that is already in the hardware. + */ + u8 mac_addr[6]; + + /* + * If speed is 0, autonegotiation is enabled. + * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000. + * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL. + */ + int speed; + int duplex; + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * Use on-chip SRAM for RX/TX descriptors if size is nonzero + * and sufficient to contain all descriptors for the requested + * ring sizes. + */ + unsigned long rx_sram_addr; + int rx_sram_size; + unsigned long tx_sram_addr; + int tx_sram_size; }; -#endif /* __LINUX_MV643XX_ETH_H */ + +#endif -- cgit v1.2.3 From 64da80a29c7455321a7df7b47e27d639e3944c1a Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 2 Jun 2008 01:01:26 +0200 Subject: mv643xx_eth: allow multiple RX queues Allow the platform code to specify that we are running on hardware that is capable of supporting multiple RX queues. If this option is used, initialise all of the given RX queues instead of just RX queue zero. Signed-off-by: Lennert Buytenhek Acked-by: Dale Farnsworth --- drivers/net/mv643xx_eth.c | 99 +++++++++++++++++++++++++++++++++------------ include/linux/mv643xx_eth.h | 5 +++ 2 files changed, 79 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 9ce7be09e295..3c8591853999 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -96,7 +96,7 @@ static char mv643xx_eth_driver_version[] = "1.0"; #define TX_BW_MTU(p) (0x0458 + ((p) << 10)) #define TX_BW_BURST(p) (0x045c + ((p) << 10)) #define INT_CAUSE(p) (0x0460 + ((p) << 10)) -#define INT_RX 0x00000804 +#define INT_RX 0x0007fbfc #define INT_EXT 0x00000002 #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) #define INT_EXT_LINK 0x00100000 @@ -107,7 +107,7 @@ static char mv643xx_eth_driver_version[] = "1.0"; #define INT_MASK(p) (0x0468 + ((p) << 10)) #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) -#define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10)) +#define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) #define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) #define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10)) #define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10)) @@ -286,6 +286,8 @@ struct mib_counters { }; struct rx_queue { + int index; + int rx_ring_size; int rx_desc_count; @@ -334,8 +336,10 @@ struct mv643xx_eth_private { int default_rx_ring_size; unsigned long rx_desc_sram_addr; int rx_desc_sram_size; + u8 rxq_mask; + int rxq_primary; struct napi_struct napi; - struct rx_queue rxq[1]; + struct rx_queue rxq[8]; /* * TX state. @@ -365,7 +369,7 @@ static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) /* rxq/txq helper functions *************************************************/ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) { - return container_of(rxq, struct mv643xx_eth_private, rxq[0]); + return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); } static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) @@ -376,13 +380,13 @@ static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) static void rxq_enable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); - wrl(mp, RXQ_COMMAND(mp->port_num), 1); + wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index); } static void rxq_disable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); - u8 mask = 1; + u8 mask = 1 << rxq->index; wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) @@ -583,6 +587,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) { struct mv643xx_eth_private *mp; int rx; + int i; mp = container_of(napi, struct mv643xx_eth_private, napi); @@ -593,7 +598,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) } #endif - rx = rxq_process(mp->rxq, budget); + rx = 0; + for (i = 7; rx < budget && i >= 0; i--) + if (mp->rxq_mask & (1 << i)) + rx += rxq_process(mp->rxq + i, budget - rx); if (rx < budget) { netif_rx_complete(mp->dev, napi); @@ -1306,13 +1314,15 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) /* rx/tx queue initialisation ***********************************************/ -static int rxq_init(struct mv643xx_eth_private *mp) +static int rxq_init(struct mv643xx_eth_private *mp, int index) { - struct rx_queue *rxq = mp->rxq; + struct rx_queue *rxq = mp->rxq + index; struct rx_desc *rx_desc; int size; int i; + rxq->index = index; + rxq->rx_ring_size = mp->default_rx_ring_size; rxq->rx_desc_count = 0; @@ -1321,7 +1331,7 @@ static int rxq_init(struct mv643xx_eth_private *mp) size = rxq->rx_ring_size * sizeof(struct rx_desc); - if (size <= mp->rx_desc_sram_size) { + if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) { rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; @@ -1362,7 +1372,7 @@ static int rxq_init(struct mv643xx_eth_private *mp) out_free: - if (size <= mp->rx_desc_sram_size) + if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(NULL, size, @@ -1395,7 +1405,8 @@ static void rxq_deinit(struct rx_queue *rxq) rxq->rx_desc_count); } - if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size) + if (rxq->index == mp->rxq_primary && + rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(NULL, rxq->rx_desc_area_size, @@ -1612,6 +1623,9 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) } } + /* + * RxBuffer or RxError set for any of the 8 queues? + */ #ifdef MV643XX_ETH_NAPI if (int_cause & INT_RX) { wrl(mp, INT_MASK(mp->port_num), 0x00000000); @@ -1620,8 +1634,13 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) netif_rx_schedule(dev, &mp->napi); } #else - if (int_cause & INT_RX) - rxq_process(mp->rxq, INT_MAX); + if (int_cause & INT_RX) { + int i; + + for (i = 7; i >= 0; i--) + if (mp->rxq_mask & (1 << i)) + rxq_process(mp->rxq + i, INT_MAX); + } #endif if (int_cause_ext & INT_EXT_TX) { @@ -1707,13 +1726,16 @@ static void port_start(struct mv643xx_eth_private *mp) wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); /* - * Enable the receive queue. + * Enable the receive queues. */ - for (i = 0; i < 1; i++) { - struct rx_queue *rxq = mp->rxq; - int off = RXQ_CURRENT_DESC_PTR(mp->port_num); + for (i = 0; i < 8; i++) { + struct rx_queue *rxq = mp->rxq + i; + int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i); u32 addr; + if ((mp->rxq_mask & (1 << i)) == 0) + continue; + addr = (u32)rxq->rx_desc_dma; addr += rxq->rx_curr_desc * sizeof(struct rx_desc); wrl(mp, off, addr); @@ -1748,6 +1770,7 @@ static int mv643xx_eth_open(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int err; + int i; wrl(mp, INT_CAUSE(mp->port_num), 0); wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); @@ -1763,10 +1786,20 @@ static int mv643xx_eth_open(struct net_device *dev) init_mac_tables(mp); - err = rxq_init(mp); - if (err) - goto out; - rxq_refill(mp->rxq); + for (i = 0; i < 8; i++) { + if ((mp->rxq_mask & (1 << i)) == 0) + continue; + + err = rxq_init(mp, i); + if (err) { + while (--i >= 0) + if (mp->rxq_mask & (1 << i)) + rxq_deinit(mp->rxq + i); + goto out; + } + + rxq_refill(mp->rxq + i); + } err = txq_init(mp); if (err) @@ -1790,7 +1823,9 @@ static int mv643xx_eth_open(struct net_device *dev) out_free: - rxq_deinit(mp->rxq); + for (i = 0; i < 8; i++) + if (mp->rxq_mask & (1 << i)) + rxq_deinit(mp->rxq + i); out: free_irq(dev->irq, dev); @@ -1800,9 +1835,13 @@ out: static void port_reset(struct mv643xx_eth_private *mp) { unsigned int data; + int i; + for (i = 0; i < 8; i++) { + if (mp->rxq_mask & (1 << i)) + rxq_disable(mp->rxq + i); + } txq_disable(mp->txq); - rxq_disable(mp->rxq); while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) udelay(10); @@ -1817,6 +1856,7 @@ static void port_reset(struct mv643xx_eth_private *mp) static int mv643xx_eth_stop(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; wrl(mp, INT_MASK(mp->port_num), 0x00000000); rdl(mp, INT_MASK(mp->port_num)); @@ -1832,8 +1872,11 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mib_counters_update(mp); + for (i = 0; i < 8; i++) { + if (mp->rxq_mask & (1 << i)) + rxq_deinit(mp->rxq + i); + } txq_deinit(mp->txq); - rxq_deinit(mp->rxq); return 0; } @@ -2085,6 +2128,12 @@ static void set_params(struct mv643xx_eth_private *mp, mp->rx_desc_sram_addr = pd->rx_sram_addr; mp->rx_desc_sram_size = pd->rx_sram_size; + if (pd->rx_queue_mask) + mp->rxq_mask = pd->rx_queue_mask; + else + mp->rxq_mask = 0x01; + mp->rxq_primary = fls(mp->rxq_mask) - 1; + mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; if (pd->tx_queue_size) mp->default_tx_ring_size = pd->tx_queue_size; diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 646177660495..1afd7ba6d303 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -48,6 +48,11 @@ struct mv643xx_eth_platform_data { int speed; int duplex; + /* + * Which RX queues to use. + */ + int rx_queue_mask; + /* * Override default RX/TX queue sizes if nonzero. */ -- cgit v1.2.3 From 3d6b35bc5090cf8d8b7e62eca1f9c21ca56fc6c7 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 2 Jun 2008 01:28:22 +0200 Subject: mv643xx_eth: allow multiple TX queues As with the multiple RX queue support, allow the platform code to specify that the hardware we are running on supports multiple TX queues. This patch only uses the highest-numbered enabled queue to send packets to for now, this can be extended later to enable QoS and such. Signed-off-by: Lennert Buytenhek Acked-by: Dale Farnsworth --- drivers/net/mv643xx_eth.c | 146 +++++++++++++++++++++++++++++++------------- include/linux/mv643xx_eth.h | 3 +- 2 files changed, 105 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 3c8591853999..287155ea9ce1 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -103,16 +103,16 @@ static char mv643xx_eth_driver_version[] = "1.0"; #define INT_EXT_PHY 0x00010000 #define INT_EXT_TX_ERROR_0 0x00000100 #define INT_EXT_TX_0 0x00000001 -#define INT_EXT_TX 0x00000101 +#define INT_EXT_TX 0x0000ffff #define INT_MASK(p) (0x0468 + ((p) << 10)) #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) #define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) #define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) -#define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10)) -#define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10)) -#define TXQ_BW_CONF(p) (0x0704 + ((p) << 10)) -#define TXQ_BW_WRR_CONF(p) (0x0708 + ((p) << 10)) +#define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2)) +#define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4)) +#define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4)) +#define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4)) #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) @@ -303,6 +303,8 @@ struct rx_queue { }; struct tx_queue { + int index; + int tx_ring_size; int tx_desc_count; @@ -347,7 +349,9 @@ struct mv643xx_eth_private { int default_tx_ring_size; unsigned long tx_desc_sram_addr; int tx_desc_sram_size; - struct tx_queue txq[1]; + u8 txq_mask; + int txq_primary; + struct tx_queue txq[8]; #ifdef MV643XX_ETH_TX_FAST_REFILL int tx_clean_threshold; #endif @@ -374,7 +378,7 @@ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) { - return container_of(txq, struct mv643xx_eth_private, txq[0]); + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); } static void rxq_enable(struct rx_queue *rxq) @@ -396,13 +400,13 @@ static void rxq_disable(struct rx_queue *rxq) static void txq_enable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - wrl(mp, TXQ_COMMAND(mp->port_num), 1); + wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index); } static void txq_disable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - u8 mask = 1; + u8 mask = 1 << txq->index; wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) @@ -413,6 +417,12 @@ static void __txq_maybe_wake(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); + /* + * netif_{stop,wake}_queue() flow control only applies to + * the primary queue. + */ + BUG_ON(txq->index != mp->txq_primary); + if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) netif_wake_queue(mp->dev); } @@ -593,8 +603,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) #ifdef MV643XX_ETH_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { - txq_reclaim(mp->txq, 0); mp->tx_clean_threshold = 0; + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_reclaim(mp->txq + i, 0); } #endif @@ -754,8 +766,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct tx_queue *txq; unsigned long flags; - BUG_ON(netif_queue_stopped(dev)); - if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { stats->tx_dropped++; dev_printk(KERN_DEBUG, &dev->dev, @@ -766,13 +776,15 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&mp->lock, flags); - txq = mp->txq; + txq = mp->txq + mp->txq_primary; if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { - printk(KERN_ERR "%s: transmit with queue full\n", dev->name); - netif_stop_queue(dev); spin_unlock_irqrestore(&mp->lock, flags); - return NETDEV_TX_BUSY; + if (txq->index == mp->txq_primary && net_ratelimit()) + dev_printk(KERN_ERR, &dev->dev, + "primary tx queue full?!\n"); + kfree_skb(skb); + return NETDEV_TX_OK; } txq_submit_skb(txq, skb); @@ -780,8 +792,13 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) stats->tx_packets++; dev->trans_start = jiffies; - if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) - netif_stop_queue(dev); + if (txq->index == mp->txq_primary) { + int entries_left; + + entries_left = txq->tx_ring_size - txq->tx_desc_count; + if (entries_left < MAX_DESCS_PER_SKB) + netif_stop_queue(dev); + } spin_unlock_irqrestore(&mp->lock, flags); @@ -831,8 +848,8 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst) if (bucket_size > 65535) bucket_size = 65535; - wrl(mp, TXQ_BW_TOKENS(mp->port_num), token_rate << 14); - wrl(mp, TXQ_BW_CONF(mp->port_num), + wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14); + wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index), (bucket_size << 10) | token_rate); } @@ -848,7 +865,7 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) off = TXQ_FIX_PRIO_CONF(mp->port_num); val = rdl(mp, off); - val |= 1; + val |= 1 << txq->index; wrl(mp, off, val); } @@ -864,13 +881,13 @@ static void txq_set_wrr(struct tx_queue *txq, int weight) off = TXQ_FIX_PRIO_CONF(mp->port_num); val = rdl(mp, off); - val &= ~1; + val &= ~(1 << txq->index); wrl(mp, off, val); /* * Configure WRR weight for this queue. */ - off = TXQ_BW_WRR_CONF(mp->port_num); + off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); val = rdl(mp, off); val = (val & ~0xff) | (weight & 0xff); @@ -1415,13 +1432,15 @@ static void rxq_deinit(struct rx_queue *rxq) kfree(rxq->rx_skb); } -static int txq_init(struct mv643xx_eth_private *mp) +static int txq_init(struct mv643xx_eth_private *mp, int index) { - struct tx_queue *txq = mp->txq; + struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; int i; + txq->index = index; + txq->tx_ring_size = mp->default_tx_ring_size; txq->tx_desc_count = 0; @@ -1430,7 +1449,7 @@ static int txq_init(struct mv643xx_eth_private *mp) size = txq->tx_ring_size * sizeof(struct tx_desc); - if (size <= mp->tx_desc_sram_size) { + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; @@ -1467,7 +1486,7 @@ static int txq_init(struct mv643xx_eth_private *mp) out_free: - if (size <= mp->tx_desc_sram_size) + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(NULL, size, @@ -1539,7 +1558,8 @@ static void txq_deinit(struct tx_queue *txq) BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); - if (txq->tx_desc_area_size <= mp->tx_desc_sram_size) + if (txq->index == mp->txq_primary && + txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(NULL, txq->tx_desc_area_size, @@ -1578,12 +1598,20 @@ static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) if ((pscr_o & SERIAL_PORT_ENABLE) == 0) wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); else { - txq_disable(mp->txq); + int i; + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_disable(mp->txq + i); + pscr_o &= ~SERIAL_PORT_ENABLE; wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); - txq_enable(mp->txq); + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_enable(mp->txq + i); } } } @@ -1609,13 +1637,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { if (mii_link_ok(&mp->mii)) { struct ethtool_cmd cmd; + int i; mii_ethtool_gset(&mp->mii, &cmd); update_pscr(mp, cmd.speed, cmd.duplex); - txq_enable(mp->txq); + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_enable(mp->txq + i); + if (!netif_carrier_ok(dev)) { netif_carrier_on(dev); - __txq_maybe_wake(mp->txq); + __txq_maybe_wake(mp->txq + mp->txq_primary); } } else if (netif_carrier_ok(dev)) { netif_stop_queue(dev); @@ -1643,9 +1675,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) } #endif + /* + * TxBuffer or TxError set for any of the 8 queues? + */ if (int_cause_ext & INT_EXT_TX) { - txq_reclaim(mp->txq, 0); - __txq_maybe_wake(mp->txq); + int i; + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_reclaim(mp->txq + i, 0); + + __txq_maybe_wake(mp->txq + mp->txq_primary); } return IRQ_HANDLED; @@ -1696,11 +1736,14 @@ static void port_start(struct mv643xx_eth_private *mp) * Configure TX path and queues. */ tx_set_rate(mp, 1000000000, 16777216); - for (i = 0; i < 1; i++) { - struct tx_queue *txq = mp->txq; - int off = TXQ_CURRENT_DESC_PTR(mp->port_num); + for (i = 0; i < 8; i++) { + struct tx_queue *txq = mp->txq + i; + int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i); u32 addr; + if ((mp->txq_mask & (1 << i)) == 0) + continue; + addr = (u32)txq->tx_desc_dma; addr += txq->tx_curr_desc * sizeof(struct tx_desc); wrl(mp, off, addr); @@ -1801,9 +1844,18 @@ static int mv643xx_eth_open(struct net_device *dev) rxq_refill(mp->rxq + i); } - err = txq_init(mp); - if (err) - goto out_free; + for (i = 0; i < 8; i++) { + if ((mp->txq_mask & (1 << i)) == 0) + continue; + + err = txq_init(mp, i); + if (err) { + while (--i >= 0) + if (mp->txq_mask & (1 << i)) + txq_deinit(mp->txq + i); + goto out_free; + } + } #ifdef MV643XX_ETH_NAPI napi_enable(&mp->napi); @@ -1840,8 +1892,9 @@ static void port_reset(struct mv643xx_eth_private *mp) for (i = 0; i < 8; i++) { if (mp->rxq_mask & (1 << i)) rxq_disable(mp->rxq + i); + if (mp->txq_mask & (1 << i)) + txq_disable(mp->txq + i); } - txq_disable(mp->txq); while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) udelay(10); @@ -1875,8 +1928,9 @@ static int mv643xx_eth_stop(struct net_device *dev) for (i = 0; i < 8; i++) { if (mp->rxq_mask & (1 << i)) rxq_deinit(mp->rxq + i); + if (mp->txq_mask & (1 << i)) + txq_deinit(mp->txq + i); } - txq_deinit(mp->txq); return 0; } @@ -1928,7 +1982,7 @@ static void tx_timeout_task(struct work_struct *ugly) port_reset(mp); port_start(mp); - __txq_maybe_wake(mp->txq); + __txq_maybe_wake(mp->txq + mp->txq_primary); } } @@ -2139,6 +2193,12 @@ static void set_params(struct mv643xx_eth_private *mp, mp->default_tx_ring_size = pd->tx_queue_size; mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_size = pd->tx_sram_size; + + if (pd->tx_queue_mask) + mp->txq_mask = pd->tx_queue_mask; + else + mp->txq_mask = 0x01; + mp->txq_primary = fls(mp->txq_mask) - 1; } static int phy_detect(struct mv643xx_eth_private *mp) diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 1afd7ba6d303..12078577aef6 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -49,9 +49,10 @@ struct mv643xx_eth_platform_data { int duplex; /* - * Which RX queues to use. + * Which RX/TX queues to use. */ int rx_queue_mask; + int tx_queue_mask; /* * Override default RX/TX queue sizes if nonzero. -- cgit v1.2.3 From c1da4ac752b8b0411791d26c678fcf23d2eed242 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Fri, 13 Jun 2008 18:12:00 -0700 Subject: net/core: add NETDEV_BONDING_FAILOVER event Add NETDEV_BONDING_FAILOVER event to be used in a successive patch by bonding to announce fail-over for the active-backup mode through the netdev events notifier chain mechanism. Such an event can be of use for the RDMA CM (communication manager) to let native RDMA ULPs (eg NFS-RDMA, iSER) always be aligned with the IP stack, in the sense that they use the same ports/links as the stack does. More usages can be done to allow monitoring tools based on netlink events being aware to bonding fail-over. Signed-off-by: Or Gerlitz Signed-off-by: Jay Vosburgh Signed-off-by: Jeff Garzik --- include/linux/netdevice.h | 1 + include/linux/notifier.h | 1 + net/core/dev.c | 6 ++++++ 3 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f27fd2009334..e92fc839ab1d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1479,6 +1479,7 @@ extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct extern void dev_set_promiscuity(struct net_device *dev, int inc); extern void dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); +extern void netdev_bonding_change(struct net_device *dev); extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0ff6224d172a..bd3d72ddf333 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -197,6 +197,7 @@ static inline int notifier_to_errno(int ret) #define NETDEV_GOING_DOWN 0x0009 #define NETDEV_CHANGENAME 0x000A #define NETDEV_FEAT_CHANGE 0x000B +#define NETDEV_BONDING_FAILOVER 0x000C #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/net/core/dev.c b/net/core/dev.c index 68d8df0992ab..0e45742e7158 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -961,6 +961,12 @@ void netdev_state_change(struct net_device *dev) } } +void netdev_bonding_change(struct net_device *dev) +{ + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev); +} +EXPORT_SYMBOL(netdev_bonding_change); + /** * dev_load - load a network module * @net: the applicable net namespace -- cgit v1.2.3 From b8a9787eddb0e4665f31dd1d64584732b2b5d051 Mon Sep 17 00:00:00 2001 From: Jay Vosburgh Date: Fri, 13 Jun 2008 18:12:04 -0700 Subject: bonding: Allow setting max_bonds to zero Permit bonding to function rationally if max_bonds is set to zero. This will load the module, but create no master devices (which can be created via sysfs). Requires some change to bond_create_sysfs; currently, the netdev sysfs directory is determined from the first bonding device created, but this is no longer possible. Instead, an interface from net/core is created to create and destroy files in net_class. Based on a patch submitted by Phil Oester . Modified by Jay Vosburgh to fix the sysfs issue mentioned above and to update the documentation. Signed-off-by: Phil Oester Signed-off-by: Jay Vosburgh Signed-off-by: Jeff Garzik --- Documentation/networking/bonding.txt | 3 ++- drivers/net/bonding/bond_main.c | 6 +++--- drivers/net/bonding/bond_sysfs.c | 22 +++------------------- include/linux/netdevice.h | 3 +++ net/core/net-sysfs.c | 13 +++++++++++++ 5 files changed, 24 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 370b7da73ab4..7fa7fe71d7a8 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -376,7 +376,8 @@ max_bonds Specifies the number of bonding devices to create for this instance of the bonding driver. E.g., if max_bonds is 3, and the bonding driver is not already loaded, then bond0, bond1 - and bond2 will be created. The default value is 1. + and bond2 will be created. The default value is 1. Specifying + a value of 0 will load bonding, but will not create any devices. miimon diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3b6d66a8ab98..d57b65dc2c72 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4750,11 +4750,11 @@ static int bond_check_params(struct bond_params *params) } } - if (max_bonds < 1 || max_bonds > INT_MAX) { + if (max_bonds < 0 || max_bonds > INT_MAX) { printk(KERN_WARNING DRV_NAME ": Warning: max_bonds (%d) not in range %d-%d, so it " "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", - max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS); + max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); max_bonds = BOND_DEFAULT_MAX_BONDS; } @@ -4953,7 +4953,7 @@ static int bond_check_params(struct bond_params *params) printk("\n"); - } else { + } else if (max_bonds) { /* miimon and arp_interval not set, we need one so things * work as expected, see bonding.txt for details */ diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index dd265c69b0df..6caac0ffb2f2 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -53,7 +53,6 @@ extern struct bond_parm_tbl arp_validate_tbl[]; extern struct bond_parm_tbl fail_over_mac_tbl[]; static int expected_refcount = -1; -static struct class *netdev_class; /*--------------------------- Data Structures -----------------------------*/ /* Bonding sysfs lock. Why can't we just use the subsystem lock? @@ -1447,19 +1446,9 @@ static struct attribute_group bonding_group = { */ int bond_create_sysfs(void) { - int ret = 0; - struct bonding *firstbond; - - /* get the netdev class pointer */ - firstbond = container_of(bond_dev_list.next, struct bonding, bond_list); - if (!firstbond) - return -ENODEV; - - netdev_class = firstbond->dev->dev.class; - if (!netdev_class) - return -ENODEV; + int ret; - ret = class_create_file(netdev_class, &class_attr_bonding_masters); + ret = netdev_class_create_file(&class_attr_bonding_masters); /* * Permit multiple loads of the module by ignoring failures to * create the bonding_masters sysfs file. Bonding devices @@ -1478,10 +1467,6 @@ int bond_create_sysfs(void) printk(KERN_ERR "network device named %s already exists in sysfs", class_attr_bonding_masters.attr.name); - else { - netdev_class = NULL; - return 0; - } } return ret; @@ -1493,8 +1478,7 @@ int bond_create_sysfs(void) */ void bond_destroy_sysfs(void) { - if (netdev_class) - class_remove_file(netdev_class, &class_attr_bonding_masters); + netdev_class_remove_file(&class_attr_bonding_masters); } /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e92fc839ab1d..9ccbfac3fd95 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1506,6 +1506,9 @@ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); extern void dev_seq_stop(struct seq_file *seq, void *v); #endif +extern int netdev_class_create_file(struct class_attribute *class_attr); +extern void netdev_class_remove_file(struct class_attribute *class_attr); + extern void linkwatch_run_queue(void); extern int netdev_compute_features(unsigned long all, unsigned long one); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index dccd737ea2e3..3f7941319217 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -468,6 +468,19 @@ int netdev_register_kobject(struct net_device *net) return device_add(dev); } +int netdev_class_create_file(struct class_attribute *class_attr) +{ + return class_create_file(&net_class, class_attr); +} + +void netdev_class_remove_file(struct class_attribute *class_attr) +{ + class_remove_file(&net_class, class_attr); +} + +EXPORT_SYMBOL(netdev_class_create_file); +EXPORT_SYMBOL(netdev_class_remove_file); + void netdev_initialize_kobject(struct net_device *net) { struct device *device = &(net->dev); -- cgit v1.2.3