summaryrefslogtreecommitdiff
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-03-05 11:02:10 +0300
committerDavid S. Miller <davem@davemloft.net>2009-03-10 15:29:47 +0300
commit33912e72d00c3627dbbb7c59463df9535176059f (patch)
tree3cea30a214bddc35c730618a14d5cb07959ccc35 /drivers/net/forcedeth.c
parent2daac3e8f831beba2012fdefda17770456be9b7e (diff)
downloadlinux-33912e72d00c3627dbbb7c59463df9535176059f.tar.xz
forcedeth: add/modify tx done with limit
There are two tx_done routines to handle tx completion processing. Both these functions now take in a limit value and return the amount of tx completions. This will be used by a future patch to determine the total amount of work done. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 74511f7e13e9..78c2fe185281 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2397,14 +2397,16 @@ static inline void nv_tx_flip_ownership(struct net_device *dev)
*
* Caller must own np->lock.
*/
-static void nv_tx_done(struct net_device *dev)
+static int nv_tx_done(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
+ int tx_work = 0;
struct ring_desc* orig_get_tx = np->get_tx.orig;
while ((np->get_tx.orig != np->put_tx.orig) &&
- !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
+ !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
+ (tx_work < limit)) {
dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
dev->name, flags);
@@ -2430,6 +2432,7 @@ static void nv_tx_done(struct net_device *dev)
}
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
+ tx_work++;
}
} else {
if (flags & NV_TX2_LASTPACKET) {
@@ -2447,6 +2450,7 @@ static void nv_tx_done(struct net_device *dev)
}
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
+ tx_work++;
}
}
if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
@@ -2458,17 +2462,19 @@ static void nv_tx_done(struct net_device *dev)
np->tx_stop = 0;
netif_wake_queue(dev);
}
+ return tx_work;
}
-static void nv_tx_done_optimized(struct net_device *dev, int limit)
+static int nv_tx_done_optimized(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
+ int tx_work = 0;
struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
- (limit-- > 0)) {
+ (tx_work < limit)) {
dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
dev->name, flags);
@@ -2492,6 +2498,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
+ tx_work++;
if (np->tx_limit) {
nv_tx_flip_ownership(dev);
@@ -2506,6 +2513,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
np->tx_stop = 0;
netif_wake_queue(dev);
}
+ return tx_work;
}
/*
@@ -2578,7 +2586,7 @@ static void nv_tx_timeout(struct net_device *dev)
/* 2) check that the packets were not sent already: */
if (!nv_optimized(np))
- nv_tx_done(dev);
+ nv_tx_done(dev, np->tx_ring_size);
else
nv_tx_done_optimized(dev, np->tx_ring_size);
@@ -3433,7 +3441,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
spin_lock(&np->lock);
- nv_tx_done(dev);
+ nv_tx_done(dev, np->tx_ring_size);
spin_unlock(&np->lock);
#ifdef CONFIG_FORCEDETH_NAPI