diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-19 23:27:23 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-19 23:27:23 +0300 |
commit | 671df189537883f36cf9c7d4f9495bfac0f86627 (patch) | |
tree | 22e5f598ed1f5d9b2218d85d4426140f804d61e6 /drivers/xen/swiotlb-xen.c | |
parent | c9fe5630dae1df2328d82042602e2c4d1add8d57 (diff) | |
parent | c7d9eccb3c1e802c5cbb2a764eb0eb9807d9f12e (diff) | |
download | linux-671df189537883f36cf9c7d4f9495bfac0f86627.tar.xz |
Merge tag 'dma-mapping-5.4' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- add dma-mapping and block layer helpers to take care of IOMMU merging
for mmc plus subsequent fixups (Yoshihiro Shimoda)
- rework handling of the pgprot bits for remapping (me)
- take care of the dma direct infrastructure for swiotlb-xen (me)
- improve the dma noncoherent remapping infrastructure (me)
- better defaults for ->mmap, ->get_sgtable and ->get_required_mask
(me)
- cleanup mmaping of coherent DMA allocations (me)
- various misc cleanups (Andy Shevchenko, me)
* tag 'dma-mapping-5.4' of git://git.infradead.org/users/hch/dma-mapping: (41 commits)
mmc: renesas_sdhi_internal_dmac: Add MMC_CAP2_MERGE_CAPABLE
mmc: queue: Fix bigger segments usage
arm64: use asm-generic/dma-mapping.h
swiotlb-xen: merge xen_unmap_single into xen_swiotlb_unmap_page
swiotlb-xen: simplify cache maintainance
swiotlb-xen: use the same foreign page check everywhere
swiotlb-xen: remove xen_swiotlb_dma_mmap and xen_swiotlb_dma_get_sgtable
xen: remove the exports for xen_{create,destroy}_contiguous_region
xen/arm: remove xen_dma_ops
xen/arm: simplify dma_cache_maint
xen/arm: use dev_is_dma_coherent
xen/arm: consolidate page-coherent.h
xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance
arm: remove wrappers for the generic dma remap helpers
dma-mapping: introduce a dma_common_find_pages helper
dma-mapping: always use VM_DMA_COHERENT for generic DMA remap
vmalloc: lift the arm flag for coherent mappings to common code
dma-mapping: provide a better default ->get_required_mask
dma-mapping: remove the dma_declare_coherent_memory export
remoteproc: don't allow modular build
...
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 84 |
1 files changed, 16 insertions, 68 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index adcabd9473eb..58c9365fa217 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -28,6 +28,7 @@ #include <linux/memblock.h> #include <linux/dma-direct.h> +#include <linux/dma-noncoherent.h> #include <linux/export.h> #include <xen/swiotlb-xen.h> #include <xen/page.h> @@ -391,6 +392,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (map == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; + phys = map; dev_addr = xen_phys_to_bus(map); /* @@ -402,14 +404,9 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, return DMA_MAPPING_ERROR; } - page = pfn_to_page(map >> PAGE_SHIFT); - offset = map & ~PAGE_MASK; done: - /* - * we are not interested in the dma_addr returned by xen_dma_map_page, - * only in the potential cache flushes executed by the function. - */ - xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); return dev_addr; } @@ -421,35 +418,29 @@ done: * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) +static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); BUG_ON(dir == DMA_NONE); - xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); + if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); } -static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - xen_unmap_single(hwdev, dev_addr, size, dir, attrs); -} - static void xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = xen_bus_to_phys(dma_addr); - xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir); + if (!dev_is_dma_coherent(dev)) + xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); if (is_xen_swiotlb_buffer(dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); @@ -464,7 +455,8 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, if (is_xen_swiotlb_buffer(dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); - xen_dma_sync_single_for_device(dev, dma_addr, size, dir); + if (!dev_is_dma_coherent(dev)) + xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); } /* @@ -481,7 +473,8 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) - xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); + xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), + dir, attrs); } @@ -547,51 +540,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; } -/* - * Create userspace mapping for the DMA-coherent memory. - * This function should be called with the pages from the current domain only, - * passing pages mapped from other domains would lead to memory corruption. - */ -static int -xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ -#ifdef CONFIG_ARM - if (xen_get_dma_ops(dev)->mmap) - return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, - dma_addr, size, attrs); -#endif - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - -/* - * This function should be called with the pages from the current domain only, - * passing pages mapped from other domains would lead to memory corruption. - */ -static int -xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ -#ifdef CONFIG_ARM - if (xen_get_dma_ops(dev)->get_sgtable) { -#if 0 - /* - * This check verifies that the page belongs to the current domain and - * is not one mapped from another domain. - * This check is for debug only, and should not go to production build - */ - unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); - BUG_ON (!page_is_ram(bfn)); -#endif - return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, - handle, size, attrs); - } -#endif - return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); -} - const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, @@ -604,6 +552,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .map_page = xen_swiotlb_map_page, .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, - .mmap = xen_swiotlb_dma_mmap, - .get_sgtable = xen_swiotlb_get_sgtable, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, }; |