summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-11-21 07:14:53 +0300
committerDavid S. Miller <davem@davemloft.net>2008-11-21 07:14:53 +0300
commit008298231abbeb91bc7be9e8b078607b816d1a4a (patch)
tree8cb0c17720086ef97c614b96241f06aa63ce8511 /net/core
parent6ab33d51713d6d60c7677c0d020910a8cb37e513 (diff)
downloadlinux-008298231abbeb91bc7be9e8b078607b816d1a4a.tar.xz
netdev: add more functions to netdevice ops
This patch moves neigh_setup and hard_start_xmit into the network device ops structure. For bisection, fix all the previously converted drivers as well. Bonding driver took the biggest hit on this. Added a prefetch of the hard_start_xmit in the fast path to try and reduce any impact this would have. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c8
4 files changed, 19 insertions, 13 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 8843f4e3f5e1..4615e9a443aa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1660,6 +1660,9 @@ static int dev_gso_segment(struct sk_buff *skb)
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ prefetch(&dev->netdev_ops->ndo_start_xmit);
if (likely(!skb->next)) {
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
@@ -1671,7 +1674,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
goto gso;
}
- return dev->hard_start_xmit(skb, dev);
+ return ops->ndo_start_xmit(skb, dev);
}
gso:
@@ -1681,7 +1684,7 @@ gso:
skb->next = nskb->next;
nskb->next = NULL;
- rc = dev->hard_start_xmit(nskb, dev);
+ rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc)) {
nskb->next = skb->next;
skb->next = nskb;
@@ -1755,10 +1758,11 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
+ const struct net_device_ops *ops = dev->netdev_ops;
u16 queue_index = 0;
- if (dev->select_queue)
- queue_index = dev->select_queue(dev, skb);
+ if (ops->ndo_select_queue)
+ queue_index = ops->ndo_select_queue(dev, skb);
else if (dev->real_num_tx_queues > 1)
queue_index = simple_tx_hash(dev, skb);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cca6a55909eb..9c3717a23cf7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1327,9 +1327,9 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
struct neigh_table *tbl)
{
struct neigh_parms *p, *ref;
- struct net *net;
+ struct net *net = dev_net(dev);
+ const struct net_device_ops *ops = dev->netdev_ops;
- net = dev_net(dev);
ref = lookup_neigh_params(tbl, net, 0);
if (!ref)
return NULL;
@@ -1341,7 +1341,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
- if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
+ if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
kfree(p);
return NULL;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 630df6034444..96fb0519eb7a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,6 +58,7 @@ static void queue_process(struct work_struct *work)
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
struct netdev_queue *txq;
if (!netif_device_present(dev) || !netif_running(dev)) {
@@ -71,7 +72,7 @@ static void queue_process(struct work_struct *work)
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
- dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
local_irq_restore(flags);
@@ -273,6 +274,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
int status = NETDEV_TX_BUSY;
unsigned long tries;
struct net_device *dev = np->dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
struct netpoll_info *npinfo = np->dev->npinfo;
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -293,7 +295,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_tx_queue_stopped(txq))
- status = dev->hard_start_xmit(skb, dev);
+ status = ops->ndo_start_xmit(skb, dev);
__netif_tx_unlock(txq);
if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4e77914c4d42..15e0c2c7aacf 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3352,14 +3352,14 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
- struct net_device *odev = NULL;
+ struct net_device *odev = pkt_dev->odev;
+ int (*xmit)(struct sk_buff *, struct net_device *)
+ = odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
__u64 idle_start = 0;
u16 queue_map;
int ret;
- odev = pkt_dev->odev;
-
if (pkt_dev->delay_us || pkt_dev->delay_ns) {
u64 now;
@@ -3440,7 +3440,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
atomic_inc(&(pkt_dev->skb->users));
retry_now:
- ret = odev->hard_start_xmit(pkt_dev->skb, odev);
+ ret = (*xmit)(pkt_dev->skb, odev);
if (likely(ret == NETDEV_TX_OK)) {
pkt_dev->last_ok = 1;
pkt_dev->sofar++;