diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-09 10:12:38 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-09 10:12:38 +0400 |
commit | eb6aafe3f843cb0e939546c03540a3b4911b6964 (patch) | |
tree | 550cfba4baadcb64f98ce6e77fe6f9b44b5bb142 /net/sched | |
parent | 86d804e10a37cd86f16bf72386c37e843a98a74b (diff) | |
download | linux-eb6aafe3f843cb0e939546c03540a3b4911b6964.tar.xz |
pkt_sched: Make qdisc_run take a netdev_queue.
This allows us to use this calling convention all the way down into
qdisc_restart().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 407dfdb142a4..fcc7533f0bcc 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, return 0; } -static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, - struct netdev_queue *dev_queue, - struct Qdisc *q) +static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue, + struct Qdisc *q) { struct sk_buff *skb; @@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, } static inline int handle_dev_cpu_collision(struct sk_buff *skb, - struct net_device *dev, struct netdev_queue *dev_queue, struct Qdisc *q) { + struct net_device *dev = dev_queue->dev; int ret; if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { @@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * >0 - queue is not empty. * */ -static inline int qdisc_restart(struct net_device *dev) +static inline int qdisc_restart(struct netdev_queue *txq) { - struct netdev_queue *txq = &dev->tx_queue; struct Qdisc *q = txq->qdisc; - struct sk_buff *skb; int ret = NETDEV_TX_BUSY; + struct net_device *dev; + struct sk_buff *skb; /* Dequeue packet */ - if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL)) + if (unlikely((skb = dequeue_skb(txq, q)) == NULL)) return 0; /* And release queue */ spin_unlock(&txq->lock); + dev = txq->dev; + HARD_TX_LOCK(dev, smp_processor_id()); if (!netif_subqueue_stopped(dev, skb)) ret = dev_hard_start_xmit(skb, dev); @@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev) case NETDEV_TX_LOCKED: /* Driver try lock failed */ - ret = handle_dev_cpu_collision(skb, dev, txq, q); + ret = handle_dev_cpu_collision(skb, txq, q); break; default: @@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev) return ret; } -void __qdisc_run(struct net_device *dev) +void __qdisc_run(struct netdev_queue *txq) { + struct net_device *dev = txq->dev; unsigned long start_time = jiffies; - while (qdisc_restart(dev)) { + while (qdisc_restart(txq)) { if (netif_queue_stopped(dev)) break; @@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev) * 2. we've been doing it for too long. */ if (need_resched() || jiffies != start_time) { - netif_schedule_queue(&dev->tx_queue); + netif_schedule_queue(txq); break; } } |