summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c86
1 files changed, 52 insertions, 34 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 1c818254b7be..cb6fc5a743ca 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp)
{
unsigned tx_head = cp->tx_head;
unsigned tx_tail = cp->tx_tail;
+ unsigned bytes_compl = 0, pkts_compl = 0;
while (tx_tail != tx_head) {
struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp)
le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE);
+ bytes_compl += skb->len;
+ pkts_compl++;
+
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp)
cp->tx_tail = tx_tail;
+ netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(cp->dev);
}
@@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
wmb();
}
cp->tx_head = entry;
+
+ netdev_sent_queue(dev, skb->len);
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp)
cp->rx_tail = 0;
cp->tx_head = cp->tx_tail = 0;
+
+ netdev_reset_queue(cp->dev);
}
static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_private *cp)
static inline void cp_start_hw (struct cp_private *cp)
{
+ dma_addr_t ring_dma;
+
cpw16(CpCmd, cp->cpcmd);
+
+ /*
+ * These (at least TxRingAddr) need to be configured after the
+ * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33
+ * (C+ Command Register) recommends that these and more be configured
+ * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
+ * it's been observed that the TxRingAddr is actually reset to garbage
+ * when C+ mode Tx is enabled in CpCmd.
+ */
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ /*
+ * Strictly speaking, the datasheet says this should be enabled
+ * *before* setting the descriptor addresses. But what, then, would
+ * prevent it from doing DMA to random unconfigured addresses?
+ * This variant appears to work fine.
+ */
cpw8(Cmd, RxOn | TxOn);
+
+ netdev_reset_queue(cp->dev);
}
static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_private *cp)
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
- dma_addr_t ring_dma;
cp_reset_hw(cp);
@@ -979,17 +1017,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
@@ -1060,17 +1087,22 @@ static int cp_init_rings (struct cp_private *cp)
static int cp_alloc_rings (struct cp_private *cp)
{
+ struct device *d = &cp->pdev->dev;
void *mem;
+ int rc;
- mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
- &cp->ring_dma, GFP_KERNEL);
+ mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
cp->rx_ring = mem;
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
- return cp_init_rings(cp);
+ rc = cp_init_rings(cp);
+ if (rc < 0)
+ dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+
+ return rc;
}
static void cp_clean_rings (struct cp_private *cp)
@@ -1192,18 +1224,16 @@ static void cp_tx_timeout(struct net_device *dev)
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
+ cp_enable_irq(cp);
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
}
-#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
struct cp_private *cp = netdev_priv(dev);
- int rc;
- unsigned long flags;
/* check for invalid MTU, according to hardware limits */
if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
@@ -1216,22 +1246,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- spin_lock_irqsave(&cp->lock, flags);
-
- cp_stop_hw(cp); /* stop h/w and free rings */
- cp_clean_rings(cp);
-
+ /* network IS up, close it, reset MTU, and come up again. */
+ cp_close(dev);
dev->mtu = new_mtu;
- cp_set_rxbufsize(cp); /* set new rx buf size */
-
- rc = cp_init_rings(cp); /* realloc and restart h/w */
- cp_start_hw(cp);
-
- spin_unlock_irqrestore(&cp->lock, flags);
-
- return rc;
+ cp_set_rxbufsize(cp);
+ return cp_open(dev);
}
-#endif /* BROKEN */
static const char mii_2_8139_map[8] = {
BasicModeCtrl,
@@ -1807,9 +1827,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
-#ifdef BROKEN
.ndo_change_mtu = cp_change_mtu,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,