diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-05-03 06:50:46 +0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-05-02 15:50:46 +0400 |
commit | 99ffc696d10b28580fe93441d627cf290ac4484c (patch) | |
tree | 58b041b2e2b3126bfc5dd8190c3627bba0b7afe6 /drivers/net/virtio_net.c | |
parent | 2e895e4c23b7f73dba7238db5c5c2dcffb2a4d9d (diff) | |
download | linux-99ffc696d10b28580fe93441d627cf290ac4484c.tar.xz |
virtio: wean net driver off NETDEV_TX_BUSY
Herbert tells me that returning NETDEV_TX_BUSY from hard_start_xmit is
seen as a poor thing to do; we should cache the packet and stop the queue.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 63 |
1 files changed, 43 insertions, 20 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fc7eeaa1f1b6..e44116452b7b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -41,6 +41,9 @@ struct virtnet_info struct net_device *dev; struct napi_struct napi; + /* The skb we couldn't send because buffers were full. */ + struct sk_buff *last_xmit_skb; + /* Number of input buffers, and max we've ever had. */ unsigned int num, max; @@ -227,17 +230,16 @@ static void free_old_xmit_skbs(struct virtnet_info *vi) } } -static int start_xmit(struct sk_buff *skb, struct net_device *dev) +static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) { - struct virtnet_info *vi = netdev_priv(dev); - int num, err; + int num; struct scatterlist sg[2+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; sg_init_table(sg, 2+MAX_SKB_FRAGS); - pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, + pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb, dest[0], dest[1], dest[2], dest[3], dest[4], dest[5]); @@ -272,30 +274,51 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev) vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; - __skb_queue_head(&vi->send, skb); + + return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); +} + +static int start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); - err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); - if (err) { - pr_debug("%s: virtio not prepared to send\n", dev->name); - netif_stop_queue(dev); - - /* Activate callback for using skbs: if this returns false it - * means some were used in the meantime. */ - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { - vi->svq->vq_ops->disable_cb(vi->svq); - netif_start_queue(dev); - goto again; + + /* If we has a buffer left over from last time, send it now. */ + if (vi->last_xmit_skb) { + if (xmit_skb(vi, vi->last_xmit_skb) != 0) { + /* Drop this skb: we only queue one. */ + vi->dev->stats.tx_dropped++; + kfree_skb(skb); + goto stop_queue; } - __skb_unlink(skb, &vi->send); + vi->last_xmit_skb = NULL; + } - return NETDEV_TX_BUSY; + /* Put new one in send queue and do transmit */ + __skb_queue_head(&vi->send, skb); + if (xmit_skb(vi, skb) != 0) { + vi->last_xmit_skb = skb; + goto stop_queue; } +done: vi->svq->vq_ops->kick(vi->svq); - - return 0; + return NETDEV_TX_OK; + +stop_queue: + pr_debug("%s: virtio not prepared to send\n", dev->name); + netif_stop_queue(dev); + + /* Activate callback for using skbs: if this returns false it + * means some were used in the meantime. */ + if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { + vi->svq->vq_ops->disable_cb(vi->svq); + netif_start_queue(dev); + goto again; + } + goto done; } #ifdef CONFIG_NET_POLL_CONTROLLER |