summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-02 20:32:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-02 20:32:06 +0300
commit4a3bb4200a5958d76cc26ebe4db4257efa56812b (patch)
tree183c76b93f48d1edb9f9df654ebcd05a08eb6ee7 /include
parenteceae1e7acaefc0a71e4dd4b8cd49270172b4731 (diff)
parentc1dec343d7abdf8e71aab2a289ab45ce8b1afb7e (diff)
downloadlinux-4a3bb4200a5958d76cc26ebe4db4257efa56812b.tar.xz
Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - fix debugfs initialization order (Anthony Iliopoulos) - use memory_intersects() directly (Kefeng Wang) - allow to return specific errors from ->map_sg (Logan Gunthorpe, Martin Oliveira) - turn the dma_map_sg return value into an unsigned int (me) - provide a common global coherent pool Ń–mplementation (me) * tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits) hexagon: use the generic global coherent pool dma-mapping: make the global coherent pool conditional dma-mapping: add a dma_init_global_coherent helper dma-mapping: simplify dma_init_coherent_memory dma-mapping: allow using the global coherent pool for !ARM ARM/nommu: use the generic dma-direct code for non-coherent devices dma-direct: add support for dma_coherent_default_memory dma-mapping: return an unsigned int from dma_map_sg{,_attrs} dma-mapping: disallow .map_sg operations from returning zero on error dma-mapping: return error code from dma_dummy_map_sg() x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR x86/amd_gart: return error code from gart_map_sg() xen: swiotlb: return error code from xen_swiotlb_map_sg() parisc: return error code from .map_sg() ops sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR sparc/iommu: return error codes from .map_sg() ops s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR s390/pci: return error code from s390_dma_map_sg() powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR powerpc/iommu: return error code from .map_sg() ops ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/dma-map-ops.h23
-rw-r--r--include/linux/dma-mapping.h44
-rw-r--r--include/linux/iommu.h22
3 files changed, 36 insertions, 53 deletions
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 0d53a96a3d64..0d5b06b3a4a6 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -41,8 +41,9 @@ struct dma_map_ops {
size_t size, enum dma_data_direction dir,
unsigned long attrs);
/*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
@@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
-
-void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
-
#else
static inline int dma_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
@@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev,
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
static inline void *dma_alloc_from_global_coherent(struct device *dev,
ssize_t size, dma_addr_t *dma_handle)
{
@@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
{
return 0;
}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
+#endif /* CONFIG_DMA_GLOBAL_POOL */
/*
* This is the actual return value from the ->alloc_noncontiguous method.
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 183e7103a66d..dca2b1355bb1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
@@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
return 0;
}
@@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
unsigned long attrs)
{
}
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -344,34 +352,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
}
/**
- * dma_map_sgtable - Map the given buffer for DMA
- * @dev: The device for which to perform the DMA operation
- * @sgt: The sg_table object describing the buffer
- * @dir: DMA direction
- * @attrs: Optional DMA attributes for the map operation
- *
- * Maps a buffer described by a scatterlist stored in the given sg_table
- * object for the @dir DMA operation by the @dev device. After success the
- * ownership for the buffer is transferred to the DMA domain. One has to
- * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
- * ownership of the buffer back to the CPU domain before touching the
- * buffer by the CPU.
- *
- * Returns 0 on success or -EINVAL on error during mapping the buffer.
- */
-static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int nents;
-
- nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
- if (nents <= 0)
- return -EINVAL;
- sgt->nents = nents;
- return 0;
-}
-
-/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 32d448050bf7..9369458ba1bd 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -414,11 +414,11 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
-extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents, int prot);
-extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot);
+extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -679,18 +679,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
{
- return 0;
+ return -ENODEV;
}
-static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
- return 0;
+ return -ENODEV;
}
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)