summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-11-15 10:20:12 +0300
committerDavid S. Miller <davem@davemloft.net>2009-11-16 09:08:33 +0300
commit9a1654ba0b50402a6bd03c7b0fe9b0200a5ea7b1 (patch)
tree3defd37672da2069e6c0ffd86b0b99c694324985 /net
parentcb43e23435a66d5ed90f804af9efe9096503979f (diff)
downloadlinux-9a1654ba0b50402a6bd03c7b0fe9b0200a5ea7b1.tar.xz
net: Optimize hard_start_xmit() return checking
Recent changes in the TX error propagation require additional checking and masking of values returned from hard_start_xmit(), mainly to separate cases where skb was consumed. This aim can be simplified by changing the order of NETDEV_TX and NET_XMIT codes, because the latter are treated similarly to negative (ERRNO) values. After this change much simpler dev_xmit_complete() is also used in sch_direct_xmit(), so it is moved to netdevice.h. Additionally NET_RX definitions in netdevice.h are moved up from between TX codes to avoid confusion while reading the TX comment. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c17
-rw-r--r--net/sched/sch_generic.c23
2 files changed, 5 insertions, 35 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 32045df1da5c..4b24d79414e3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1924,23 +1924,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
-static inline bool dev_xmit_complete(int rc)
-{
- /* successful transmission */
- if (rc == NETDEV_TX_OK)
- return true;
-
- /* error while transmitting, driver consumed skb */
- if (rc < 0)
- return true;
-
- /* error while queueing to a different device, driver consumed skb */
- if (rc & NET_XMIT_MASK)
- return true;
-
- return false;
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b13821ad2fb6..5173c1e1b19c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -119,39 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_tx_queue_stopped(txq) &&
- !netif_tx_queue_frozen(txq)) {
+ if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
- /* an error implies that the skb was consumed */
- if (ret < 0)
- ret = NETDEV_TX_OK;
- /* all NET_XMIT codes map to NETDEV_TX_OK */
- ret &= ~NET_XMIT_MASK;
- }
HARD_TX_UNLOCK(dev, txq);
spin_lock(root_lock);
- switch (ret) {
- case NETDEV_TX_OK:
- /* Driver sent out skb successfully */
+ if (dev_xmit_complete(ret)) {
+ /* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
- break;
-
- case NETDEV_TX_LOCKED:
+ } else if (ret == NETDEV_TX_LOCKED) {
/* Driver try lock failed */
ret = handle_dev_cpu_collision(skb, txq, q);
- break;
-
- default:
+ } else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
- break;
}
if (ret && (netif_tx_queue_stopped(txq) ||