summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h18
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c167
-rw-r--r--drivers/net/xen-netback/xenbus.c13
4 files changed, 179 insertions, 35 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b318b6f..6dc76c1e807b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -210,12 +210,22 @@ enum state_bit_shift {
VIF_STATUS_CONNECTED,
};
+struct xenvif_mcast_addr {
+ struct list_head entry;
+ struct rcu_head rcu;
+ u8 addr[6];
+};
+
+#define XEN_NETBK_MCAST_MAX 64
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
u8 fe_dev_addr[6];
+ struct list_head fe_mcast_addr;
+ unsigned int fe_mcast_count;
/* Frontend feature information. */
int gso_mask;
@@ -224,6 +234,7 @@ struct xenvif {
u8 can_sg:1;
u8 ip_csum:1;
u8 ipv6_csum:1;
+ u8 multicast_control:1;
/* Is this interface disabled? True when backend discovers
* frontend is rogue.
@@ -325,9 +336,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
queue->pending_prod + queue->pending_cons;
}
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
@@ -344,4 +352,8 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
+/* Multicast control */
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
+void xenvif_mcast_addr_list_free(struct xenvif *vif);
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..e7bd63eb2876 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
+
+ /* Wake the dealloc thread _after_ decrementing inflight_packets so
+ * that if kthread_stop() has already been called, the dealloc thread
+ * does not wait forever with nothing to wake it.
+ */
+ wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
@@ -165,6 +171,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
!xenvif_schedulable(vif))
goto drop;
+ if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ if (!xenvif_mcast_match(vif, eth->h_dest))
+ goto drop;
+ }
+
cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + vif->drain_timeout;
@@ -421,6 +434,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->num_queues = 0;
spin_lock_init(&vif->lock);
+ INIT_LIST_HEAD(&vif->fe_mcast_addr);
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
@@ -655,6 +669,8 @@ void xenvif_disconnect(struct xenvif *vif)
xenvif_unmap_frontend_rings(queue);
}
+
+ xenvif_mcast_addr_list_free(vif);
}
/* Reverse the relevant parts of xenvif_init_queue().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..42569b994ea8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1170,14 +1157,89 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
return false;
}
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Too many multicast addresses\n");
+ return -ENOSPC;
+ }
+
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+ if (!mcast)
+ return -ENOMEM;
+
+ ether_addr_copy(mcast->addr, addr);
+ list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+ vif->fe_mcast_count++;
+
+ return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ --vif->fe_mcast_count;
+ list_del_rcu(&mcast->entry);
+ kfree_rcu(mcast, rcu);
+ break;
+ }
+ }
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+ /* No need for locking or RCU here. NAPI poll and TX queue
+ * are stopped.
+ */
+ while (!list_empty(&vif->fe_mcast_addr)) {
+ struct xenvif_mcast_addr *mcast;
+
+ mcast = list_first_entry(&vif->fe_mcast_addr,
+ struct xenvif_mcast_addr,
+ entry);
+ --vif->fe_mcast_count;
+ list_del(&mcast->entry);
+ kfree(mcast);
+ }
+}
+
static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1227,6 +1289,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+ ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq,
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+ push_tx_responses(queue);
+ continue;
+ }
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+ xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ continue;
+ }
+
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@@ -1265,6 +1352,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1382,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1294,9 +1405,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1419,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1536,7 +1640,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
@@ -1566,13 +1669,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
pending_idx =
queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-queue->tx_unmap_ops] =
+ pending_idx_release[gop - queue->tx_unmap_ops] =
pending_idx;
- queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index ec383b0f5443..929a6e7e5ecf 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -327,6 +327,14 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ /* We support multicast-control. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-multicast-control", "%d", 1);
+ if (err) {
+ message = "writing feature-multicast-control";
+ goto abort_transaction;
+ }
+
err = xenbus_transaction_end(xbt, 0);
} while (err == -EAGAIN);
@@ -1016,6 +1024,11 @@ static int read_xenbus_vif_flags(struct backend_info *be)
val = 0;
vif->ipv6_csum = !!val;
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
+ "%d", &val) < 0)
+ val = 0;
+ vif->multicast_control = !!val;
+
return 0;
}