summaryrefslogtreecommitdiff
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/biomerge.c5
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev.c19
-rw-r--r--drivers/xen/privcmd-buf.c8
-rw-r--r--drivers/xen/swiotlb-xen.c196
6 files changed, 73 insertions, 158 deletions
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index f3fbb700f569..05a286d24f14 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -4,12 +4,13 @@
#include <xen/xen.h>
#include <xen/page.h>
+/* check if @page can be merged with 'vec1' */
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2)
+ const struct page *page)
{
#if XEN_PAGE_SIZE == PAGE_SIZE
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 117e76b2f939..084e45882c73 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1687,7 +1687,6 @@ void __init xen_init_IRQ(void)
#ifdef CONFIG_X86
if (xen_pv_domain()) {
- irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
pci_xen_initial_domain();
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6d1a5e58968f..f341b016672f 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -664,7 +664,7 @@ static int evtchn_open(struct inode *inode, struct file *filp)
filp->private_data = u;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int evtchn_release(struct inode *inode, struct file *filp)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 7cf9c51318aa..469dfbd6cf90 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_grant_map *map;
int ret = 0;
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock))
return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
list_for_each_entry(map, &priv->freeable_maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
@@ -852,7 +852,7 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
unsigned long xen_pfn;
int ret;
- ret = get_user_pages_fast(addr, 1, writeable, &page);
+ ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
if (ret < 0)
return ret;
@@ -1084,7 +1084,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = vma_pages(vma);
struct gntdev_grant_map *map;
- int i, err = -EINVAL;
+ int err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -1145,12 +1145,9 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- for (i = 0; i < count; i++) {
- err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
- map->pages[i]);
- if (err)
- goto out_put_map;
- }
+ err = vm_map_pages(vma, map->pages, map->count);
+ if (err)
+ goto out_put_map;
} else {
#ifdef CONFIG_X86
/*
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index a1c61e351d3f..dd5bbb6e1b6b 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -165,12 +165,8 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_priv->n_pages != count)
ret = -ENOMEM;
else
- for (i = 0; i < vma_priv->n_pages; i++) {
- ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
- vma_priv->pages[i]);
- if (ret)
- break;
- }
+ ret = vm_map_pages_zero(vma, vma_priv->pages,
+ vma_priv->n_pages);
if (ret)
privcmd_buf_vmapriv_free(vma_priv);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 877baf2a94f4..5dcb06fe9667 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- (swiotlb_force != SWIOTLB_FORCE)) {
- /* we are not interested in the dma_addr returned by
- * xen_dma_map_page, only in the potential cache flushes executed
- * by the function. */
- xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
- return dev_addr;
- }
+ swiotlb_force != SWIOTLB_FORCE)
+ goto done;
/*
* Oh well, have to allocate and map a bounce buffer.
@@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
- xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
- dev_addr, map & ~PAGE_MASK, size, dir, attrs);
/*
* Ensure that the address returned is DMA'ble
*/
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+ if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return DMA_MAPPING_ERROR;
+ }
- return DMA_MAPPING_ERROR;
+ page = pfn_to_page(map >> PAGE_SHIFT);
+ offset = map & ~PAGE_MASK;
+done:
+ /*
+ * we are not interested in the dma_addr returned by xen_dma_map_page,
+ * only in the potential cache flushes executed by the function.
+ */
+ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+ return dev_addr;
}
/*
@@ -455,48 +456,28 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so. At the next point you give the dma
- * address back to the card, you must first perform a
- * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
static void
-xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
+xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (target == SYNC_FOR_CPU)
- xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
+ phys_addr_t paddr = xen_bus_to_phys(dma_addr);
- /* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr))
- swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
+ xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
- if (target == SYNC_FOR_DEVICE)
- xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
+ if (is_xen_swiotlb_buffer(dma_addr))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
-void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
+static void
+xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
{
- xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
+ phys_addr_t paddr = xen_bus_to_phys(dma_addr);
-void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+ if (is_xen_swiotlb_buffer(dma_addr))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+
+ xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
}
/*
@@ -504,9 +485,8 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
* concerning calls here are the same as for swiotlb_unmap_page() above.
*/
static void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
+xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -518,26 +498,9 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
}
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above xen_swiotlb_map_page
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
- * same here.
- */
static int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
+xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -545,85 +508,44 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = xen_phys_to_bus(paddr);
-
- if (swiotlb_force == SWIOTLB_FORCE ||
- xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
- !dma_capable(hwdev, dev_addr, sg->length) ||
- range_straddles_page_boundary(paddr, sg->length)) {
- phys_addr_t map = swiotlb_tbl_map_single(hwdev,
- start_dma_addr,
- sg_phys(sg),
- sg->length,
- dir, attrs);
- if (map == DMA_MAPPING_ERROR) {
- dev_warn(hwdev, "swiotlb buffer is full\n");
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
- attrs);
- sg_dma_len(sgl) = 0;
- return 0;
- }
- dev_addr = xen_phys_to_bus(map);
- xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
- dev_addr,
- map & ~PAGE_MASK,
- sg->length,
- dir,
- attrs);
- sg->dma_address = dev_addr;
- } else {
- /* we are not interested in the dma_addr returned by
- * xen_dma_map_page, only in the potential cache flushes executed
- * by the function. */
- xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
- dev_addr,
- paddr & ~PAGE_MASK,
- sg->length,
- dir,
- attrs);
- sg->dma_address = dev_addr;
- }
+ sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
+ sg->offset, sg->length, dir, attrs);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ goto out_unmap;
sg_dma_len(sg) = sg->length;
}
+
return nelems;
+out_unmap:
+ xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ sg_dma_len(sgl) = 0;
+ return 0;
}
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
static void
-xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- enum dma_sync_target target)
+xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_sync_single(hwdev, sg->dma_address,
- sg_dma_len(sg), dir, target);
-}
-
-static void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+ for_each_sg(sgl, sg, nelems, i) {
+ xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
+ sg->length, dir);
+ }
}
static void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir)
{
- xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i) {
+ xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
+ sg->length, dir);
+ }
}
/*
@@ -690,8 +612,8 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_sg = xen_swiotlb_map_sg,
+ .unmap_sg = xen_swiotlb_unmap_sg,
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,