diff options
-rw-r--r-- | arch/powerpc/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/device.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 90 |
3 files changed, 10 insertions, 86 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index e9b091d35872..be868bfbe76e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -152,6 +152,7 @@ config PPC select CLONE_BACKWARDS select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DMA_OPS if PPC64 + select DMA_OPS_BYPASS if PPC64 select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 266542769e4b..452402215e12 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -19,11 +19,6 @@ struct iommu_table; */ struct dev_archdata { /* - * Set to %true if the dma_iommu_ops are requested to use a direct - * window instead of dynamically mapping memory. - */ - bool iommu_bypass : 1; - /* * These two used to be a union. However, with the hybrid ops we need * both so here we store both a DMA offset for direct mappings and * an iommu_table for remapped DMA. diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e486d1d78de2..569fecd7b5b2 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -14,23 +14,6 @@ * Generic iommu implementation */ -/* - * The coherent mask may be smaller than the real mask, check if we can - * really use a direct window. - */ -static inline bool dma_iommu_alloc_bypass(struct device *dev) -{ - return dev->archdata.iommu_bypass && !iommu_fixed_is_weak && - dma_direct_supported(dev, dev->coherent_dma_mask); -} - -static inline bool dma_iommu_map_bypass(struct device *dev, - unsigned long attrs) -{ - return dev->archdata.iommu_bypass && - (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING)); -} - /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - if (dma_iommu_alloc_bypass(dev)) - return dma_direct_alloc(dev, size, dma_handle, flag, attrs); return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); @@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (dma_iommu_alloc_bypass(dev)) - dma_direct_free(dev, size, vaddr, dma_handle, attrs); - else - iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, - dma_handle); + iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be @@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, unsigned long attrs) { - if (dma_iommu_map_bypass(dev, attrs)) - return dma_direct_map_page(dev, page, offset, size, direction, - attrs); return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, dma_get_mask(dev), direction, attrs); } @@ -79,11 +53,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (!dma_iommu_map_bypass(dev, attrs)) - iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, - direction, attrs); - else - dma_direct_unmap_page(dev, dma_handle, size, direction, attrs); + iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, + attrs); } @@ -91,8 +62,6 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { - if (dma_iommu_map_bypass(dev, attrs)) - return dma_direct_map_sg(dev, sglist, nelems, direction, attrs); return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, dma_get_mask(dev), direction, attrs); } @@ -101,11 +70,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { - if (!dma_iommu_map_bypass(dev, attrs)) - ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, + ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); - else - dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs); } static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) @@ -113,8 +79,9 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) struct pci_dev *pdev = to_pci_dev(dev); struct pci_controller *phb = pci_bus_to_host(pdev->bus); - return phb->controller_ops.iommu_bypass_supported && - phb->controller_ops.iommu_bypass_supported(pdev, mask); + if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported) + return false; + return phb->controller_ops.iommu_bypass_supported(pdev, mask); } /* We support DMA to/from any memory page via the iommu */ @@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) struct iommu_table *tbl = get_iommu_table_base(dev); if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { - dev->archdata.iommu_bypass = true; + dev->dma_ops_bypass = true; dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); return 1; } @@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) } dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); - dev->archdata.iommu_bypass = false; + dev->dma_ops_bypass = false; return 1; } @@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev) if (!tbl) return 0; - if (dev_is_pci(dev)) { - u64 bypass_mask = dma_direct_get_required_mask(dev); - - if (dma_iommu_bypass_supported(dev, bypass_mask)) - return bypass_mask; - } - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); mask += mask - 1; return mask; } -static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_single_for_cpu(dev, addr, size, dir); -} - -static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_single_for_device(dev, addr, sz, dir); -} - -extern void dma_iommu_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); -} - -extern void dma_iommu_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_sg_for_device(dev, sgl, nents, dir); -} - const struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, @@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = { .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, .get_required_mask = dma_iommu_get_required_mask, - .sync_single_for_cpu = dma_iommu_sync_for_cpu, - .sync_single_for_device = dma_iommu_sync_for_device, - .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu, - .sync_sg_for_device = dma_iommu_sync_sg_for_device, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, }; |