summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fff8cddfed81..b9de31ea7fc4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}
-static int xenvif_rx_schedulable(struct xenvif *vif)
-{
- return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
-}
-
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
@@ -105,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
+ vif->rx_event = true;
+ xenvif_kick_thread(vif);
return IRQ_HANDLED;
}
@@ -122,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ int min_slots_needed;
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL)
+ if (vif->task == NULL || !xenvif_schedulable(vif))
goto drop;
- /* Drop the packet if the target domain has no receive buffers. */
- if (!xenvif_rx_schedulable(vif))
- goto drop;
+ /* At best we'll need one slot for the header and one for each
+ * frag.
+ */
+ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
- /* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+ /* If the skb is GSO then we'll also need an extra slot for the
+ * metadata.
+ */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ min_slots_needed++;
- if (vif->can_queue && xenvif_must_stop_queue(vif))
- netif_stop_queue(dev);
+ /* If the skb can't possibly fit in the remaining slots
+ * then turn off the queue to give the ring a chance to
+ * drain.
+ */
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ xenvif_stop_queue(vif);
- xenvif_queue_tx_skb(vif, skb);
+ skb_queue_tail(&vif->rx_queue, skb);
+ xenvif_kick_thread(vif);
return NETDEV_TX_OK;
@@ -149,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_notify_tx_completion(struct xenvif *vif)
-{
- if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
-}
-
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -388,6 +388,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (err < 0)
goto err;
+ init_waitqueue_head(&vif->wq);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
@@ -420,7 +422,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- init_waitqueue_head(&vif->wq);
task = kthread_create(xenvif_kthread,
(void *)vif, "%s", vif->dev->name);
if (IS_ERR(task)) {