summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h11
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c138
3 files changed, 59 insertions, 94 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 161f207786a4..94b79c3338c4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -58,10 +58,6 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */
- grant_handle_t tx_shmem_handle;
- grant_ref_t tx_shmem_ref;
- grant_handle_t rx_shmem_handle;
- grant_ref_t rx_shmem_ref;
unsigned int irq;
/* List of frontends to notify after a batch of frames sent. */
@@ -70,8 +66,6 @@ struct xenvif {
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
- struct vm_struct *tx_comms_area;
- struct vm_struct *rx_comms_area;
/* Frontend feature information. */
u8 can_sg:1;
@@ -106,6 +100,11 @@ struct xenvif {
wait_queue_head_t waiting_to_free;
};
+static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
+{
+ return to_xenbus_device(vif->dev->dev.parent);
+}
+
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4ed..182562952c79 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif);
rtnl_lock();
- if (netif_running(vif->dev))
- xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
rtnl_unlock();
return 0;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fd00f25d9850..0cb594c86090 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,6 +60,9 @@ struct netbk_rx_meta {
#define MAX_PENDING_REQS 256
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
#define MAX_BUFFER_OFFSET PAGE_SIZE
/* extra field used in struct page */
@@ -155,13 +158,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 flags);
static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return page_to_pfn(netbk->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
}
@@ -215,6 +218,16 @@ static int get_page_ext(struct page *pg,
sizeof(struct iphdr) + MAX_IPOPTLEN + \
sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+static u16 frag_get_pending_idx(skb_frag_t *frag)
+{
+ return (u16)frag->page_offset;
+}
+
+static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
+{
+ frag->page_offset = pending_idx;
+}
+
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
@@ -321,7 +334,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_shinfo(skb)->frags[i].size;
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long bytes;
while (size > 0) {
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
@@ -512,8 +525,8 @@ static int netbk_gop_skb(struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
@@ -890,7 +903,7 @@ static int netbk_count_requests(struct xenvif *vif,
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
struct sk_buff *skb,
- unsigned long pending_idx)
+ u16 pending_idx)
{
struct page *page;
page = alloc_page(GFP_KERNEL|__GFP_COLD);
@@ -909,11 +922,11 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- unsigned long pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < shinfo->nr_frags; i++, txp++) {
struct page *page;
@@ -945,7 +958,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
xenvif_get(vif);
pending_tx_info[pending_idx].vif = vif;
- frags[i].page = (void *)pending_idx;
+ frag_set_pending_idx(&frags[i], pending_idx);
}
return gop;
@@ -956,7 +969,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
- int pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
@@ -976,13 +989,13 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
}
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_ring_idx_t index;
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
@@ -1008,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1029,15 +1042,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
- unsigned long pending_idx;
+ struct page *page;
+ u16 pending_idx;
- pending_idx = (unsigned long)frag->page;
+ pending_idx = frag_get_pending_idx(frag);
txp = &netbk->pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
- frag->size = txp->size;
- frag->page_offset = txp->offset;
-
+ page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
@@ -1349,11 +1361,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->frags[0].page =
- (void *)(unsigned long)pending_idx;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ pending_idx);
} else {
- /* Discriminate from any valid pending_idx value. */
- skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ INVALID_PENDING_IDX);
}
__skb_queue_tail(&netbk->tx_queue, skb);
@@ -1577,88 +1589,42 @@ static int xen_netbk_kthread(void *data)
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
- struct gnttab_unmap_grant_ref op;
-
- if (vif->tx.sring) {
- gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
- GNTMAP_host_map, vif->tx_shmem_handle);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
- }
-
- if (vif->rx.sring) {
- gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
- GNTMAP_host_map, vif->rx_shmem_handle);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
- }
- if (vif->rx_comms_area)
- free_vm_area(vif->rx_comms_area);
- if (vif->tx_comms_area)
- free_vm_area(vif->tx_comms_area);
+ if (vif->tx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->tx.sring);
+ if (vif->rx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
- struct gnttab_map_grant_ref op;
+ void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
- vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->tx_comms_area == NULL)
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ tx_ring_ref, &addr);
+ if (err)
goto err;
- vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->rx_comms_area == NULL)
- goto err;
-
- gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
- GNTMAP_host_map, tx_ring_ref, vif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status) {
- netdev_warn(vif->dev,
- "failed to map tx ring. err=%d status=%d\n",
- err, op.status);
- err = op.status;
- goto err;
- }
-
- vif->tx_shmem_ref = tx_ring_ref;
- vif->tx_shmem_handle = op.handle;
-
- txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
+ txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
- gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
- GNTMAP_host_map, rx_ring_ref, vif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status) {
- netdev_warn(vif->dev,
- "failed to map rx ring. err=%d status=%d\n",
- err, op.status);
- err = op.status;
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ rx_ring_ref, &addr);
+ if (err)
goto err;
- }
- vif->rx_shmem_ref = rx_ring_ref;
- vif->rx_shmem_handle = op.handle;
- vif->rx_req_cons_peek = 0;
-
- rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
+ rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+ vif->rx_req_cons_peek = 0;
+
return 0;
err: