summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 09:49:00 +0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 09:49:00 +0400
commit816f3258e70db38d6d92c8d871377179fd69160f (patch)
tree7ab28132592c82e2ac40317733ea1dd7d6f4e5b5
parentb0e1e6462df3c5944010b3328a546d8fe5d932cd (diff)
downloadlinux-816f3258e70db38d6d92c8d871377179fd69160f.tar.xz
netdev: Kill qdisc_ingress, use netdev->rx_queue.qdisc instead.
Now that our qdisc management is bi-directional, per-queue, and fully orthogonal, there is no reason to have a special ingress qdisc pointer in struct net_device. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--net/core/dev.c4
-rw-r--r--net/sched/sch_api.c11
3 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index df702a7b3db5..e7c49246fd88 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -634,9 +634,6 @@ struct net_device
struct netdev_queue rx_queue;
struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
-
- struct Qdisc *qdisc_ingress;
-
unsigned long tx_queue_len; /* Max frames per queue allowed */
/* Partially transmitted GSO packet. */
diff --git a/net/core/dev.c b/net/core/dev.c
index ce79c28d739d..ab760a954d99 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2033,7 +2033,7 @@ static int ing_filter(struct sk_buff *skb)
rxq = &dev->rx_queue;
spin_lock(&rxq->lock);
- if ((q = dev->qdisc_ingress) != NULL)
+ if ((q = rxq->qdisc) != NULL)
result = q->enqueue(skb, q);
spin_unlock(&rxq->lock);
@@ -2044,7 +2044,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (!skb->dev->qdisc_ingress)
+ if (!skb->dev->rx_queue.qdisc)
goto out;
if (*pt_prev) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2313fa7c97be..4003c280b69f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -450,14 +450,15 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
qdisc_lock_tree(dev);
if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
- oqdisc = dev->qdisc_ingress;
+ dev_queue = &dev->rx_queue;
+ oqdisc = dev_queue->qdisc;
/* Prune old scheduler */
if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
/* delete */
qdisc_reset(oqdisc);
- dev->qdisc_ingress = NULL;
+ dev_queue->qdisc = NULL;
} else { /* new */
- dev->qdisc_ingress = qdisc;
+ dev_queue->qdisc = qdisc;
}
} else {
@@ -739,7 +740,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->qdisc_ingress;
+ q = dev->rx_queue.qdisc;
}
} else {
struct netdev_queue *dev_queue = &dev->tx_queue;
@@ -814,7 +815,7 @@ replay:
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->qdisc_ingress;
+ q = dev->rx_queue.qdisc;
}
} else {
struct netdev_queue *dev_queue = &dev->tx_queue;