summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-09-01 15:00:56 +0300
committerChristoph Hellwig <hch@lst.de>2020-09-25 07:20:48 +0300
commite8d39a903cc680c8c1ab5e528428f182f42b8a35 (patch)
tree78379c41e71f5789f2d0edea8d6c080dd9037e35 /drivers
parentde7cf917768f438aae6d2f4e9bced3739f15f5b6 (diff)
downloadlinux-e8d39a903cc680c8c1ab5e528428f182f42b8a35.tar.xz
dma-iommu: implement ->alloc_noncoherent
Implement the alloc_noncoherent method to provide memory that is neither coherent not contiguous. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/dma-iommu.c41
1 files changed, 37 insertions, 4 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 00a5b49248e3..c12c1dc43d31 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -572,6 +572,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* @size: Size of buffer in bytes
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
+ * @prot: pgprot_t to use for the remapped mapping
* @attrs: DMA attributes for this allocation
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
@@ -580,14 +581,14 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* Return: Mapped virtual address, or NULL on failure.
*/
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -1030,8 +1031,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
- return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
+ return iommu_dma_alloc_remap(dev, size, handle, gfp,
+ dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
+ }
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
@@ -1052,6 +1055,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
+#ifdef CONFIG_DMA_REMAP
+static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ if (!gfpflags_allow_blocking(gfp)) {
+ struct page *page;
+
+ page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
+ if (!page)
+ return NULL;
+ return page_address(page);
+ }
+
+ return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
+ PAGE_KERNEL, 0);
+}
+
+static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
+{
+ __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_free(dev, size, cpu_addr);
+}
+#else
+#define iommu_dma_alloc_noncoherent NULL
+#define iommu_dma_free_noncoherent NULL
+#endif /* CONFIG_DMA_REMAP */
+
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -1122,6 +1153,8 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
+ .alloc_noncoherent = iommu_dma_alloc_noncoherent,
+ .free_noncoherent = iommu_dma_free_noncoherent,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,