summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2.c49
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--drivers/net/tg3.c51
-rw-r--r--drivers/net/tg3.h8
5 files changed, 80 insertions, 70 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index db73de0d2511..652eb05a6c2d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4.43"
-#define DRV_MODULE_RELDATE "June 28, 2006"
+#define DRV_MODULE_VERSION "1.4.44"
+#define DRV_MODULE_RELDATE "August 10, 2006"
#define RUN_AT(x) (jiffies + (x))
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
{
- u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+ u32 diff;
+ smp_mb();
+ diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
if (diff > MAX_TX_DESC_CNT)
diff = (diff & MAX_TX_DESC_CNT) - 1;
return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = dev_alloc_skb(bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (skb == NULL) {
return -ENOMEM;
}
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
skb_reserve(skb, 8 - align);
}
- skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
}
bp->tx_cons = sw_cons;
+ /* Need to make the tx_cons update visible to bnx2_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnx2_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
- if (unlikely(netif_queue_stopped(bp->dev))) {
- spin_lock(&bp->tx_lock);
+ if (unlikely(netif_queue_stopped(bp->dev)) &&
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+ netif_tx_lock(bp->dev);
if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
netif_wake_queue(bp->dev);
- }
- spin_unlock(&bp->tx_lock);
+ netif_tx_unlock(bp->dev);
}
}
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
- new_skb = dev_alloc_skb(len + 2);
+ new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL)
goto reuse_rx;
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
- new_skb->dev = bp->dev;
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
struct tx_bd *txbd;
u32 val;
+ bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
return -EINVAL;
pkt_size = 1514;
- skb = dev_alloc_skb(pkt_size);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
#endif
/* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
*/
static int
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
- spin_lock(&bp->tx_lock);
netif_stop_queue(dev);
-
- if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+ if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
netif_wake_queue(dev);
- spin_unlock(&bp->tx_lock);
}
return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- spin_lock_init(&bp->tx_lock);
INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[5] = (u8) reg;
bp->tx_ring_size = MAX_TX_DESC_CNT;
- bnx2_set_rx_ring_size(bp, 100);
+ bnx2_set_rx_ring_size(bp, 255);
bp->rx_csum = 1;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c73..fe804763c607 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
u16 tx_prod;
- struct tx_bd *tx_desc_ring;
- struct sw_bd *tx_buf_ring;
- int tx_ring_size;
-
u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
u16 hw_tx_cons;
@@ -3916,9 +3912,11 @@ struct bnx2 {
struct sw_bd *rx_buf_ring;
struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
- /* Only used to synchronize netif_stop_queue/wake_queue when tx */
- /* ring is full */
- spinlock_t tx_lock;
+ /* TX constants */
+ struct tx_bd *tx_desc_ring;
+ struct sw_bd *tx_buf_ring;
+ int tx_ring_size;
+ u32 tx_wake_thresh;
/* End of fields used in the performance code paths. */
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b94..c872f7c6cce3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
-static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
{
struct channel *pch;
- pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0)
return -ENOMEM;
- memset(pch, 0, sizeof(struct channel));
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
int ret = -ENOMEM;
int i;
- ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+ ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
- memset(ppp, 0, sizeof(struct ppp));
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
}
atomic_inc(&ppp_unit_count);
- cardmap_set(&all_ppp_units, unit, ppp);
+ ret = cardmap_set(&all_ppp_units, unit, ppp);
+ if (ret != 0)
+ goto out3;
+
mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
+out3:
+ atomic_dec(&ppp_unit_count);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
return NULL;
}
-static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
+ return 0;
+ enomem:
+ return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6f97962dd06b..eafabb253f08 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.64"
-#define DRV_MODULE_RELDATE "July 31, 2006"
+#define DRV_MODULE_VERSION "3.65"
+#define DRV_MODULE_RELDATE "August 07, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -123,9 +123,6 @@
TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
-#define TX_BUFFS_AVAIL(TP) \
- ((TP)->tx_pending - \
- (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
spin_unlock(&tp->lock);
}
+static inline u32 tg3_tx_avail(struct tg3 *tp)
+{
+ smp_mb();
+ return (tp->tx_pending -
+ ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+}
+
/* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does.
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
tp->tx_cons = sw_idx;
- if (unlikely(netif_queue_stopped(tp->dev))) {
- spin_lock(&tp->tx_lock);
+ /* Need to make the tx_cons update visible to tg3_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that tg3_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(tp->dev) &&
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
+ netif_tx_lock(tp->dev);
if (netif_queue_stopped(tp->dev) &&
- (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
+ netif_tx_unlock(tp->dev);
}
}
@@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
if (skb == NULL)
return -ENOMEM;
- skb->dev = tp->dev;
skb_reserve(skb, tp->rx_offset);
mapping = pci_map_single(tp->pdev, skb->data,
@@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
if (copy_skb == NULL)
goto drop_it_no_recycle;
- copy_skb->dev = tp->dev;
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
- spin_lock(&tp->tx_lock);
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
}
out_unlock:
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
struct sk_buff *segs, *nskb;
/* Estimate the number of fragments in the worst case */
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
netif_stop_queue(tp->dev);
return NETDEV_TX_BUSY;
}
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
- spin_lock(&tp->tx_lock);
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
}
out_unlock:
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif
spin_lock_init(&tp->lock);
- spin_lock_init(&tp->tx_lock);
spin_lock_init(&tp->indirect_lock);
INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ba2c98711c88..3ecf356cfb08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2079,9 +2079,9 @@ struct tg3 {
* lock: Held during reset, PHY access, timer, and when
* updating tg3_flags and tg3_flags2.
*
- * tx_lock: Held during tg3_start_xmit and tg3_tx only
- * when calling netif_[start|stop]_queue.
- * tg3_start_xmit is protected by netif_tx_lock.
+ * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
+ * netif_tx_lock when it needs to call
+ * netif_wake_queue.
*
* Both of these locks are to be held with BH safety.
*
@@ -2118,8 +2118,6 @@ struct tg3 {
u32 tx_cons;
u32 tx_pending;
- spinlock_t tx_lock;
-
struct tg3_tx_buffer_desc *tx_ring;
struct tx_ring_info *tx_buffers;
dma_addr_t tx_desc_mapping;