summaryrefslogtreecommitdiff
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h60
1 files changed, 28 insertions, 32 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 6309a721394b..4a1c4fca475a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -131,6 +131,7 @@ struct dma_map_ops {
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
+ unsigned long (*get_merge_boundary)(struct device *dev);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -149,11 +150,6 @@ static inline int valid_dma_direction(int dma_direction)
(dma_direction == DMA_FROM_DEVICE));
}
-static inline int is_device_dma_capable(struct device *dev)
-{
- return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
-}
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
/*
* These three functions are only for dma allocator.
@@ -462,11 +458,13 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+unsigned long dma_get_merge_boundary(struct device *dev);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
@@ -552,6 +550,10 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
return -ENXIO;
}
+static inline bool dma_can_mmap(struct device *dev)
+{
+ return false;
+}
static inline int dma_supported(struct device *dev, u64 mask)
{
return 0;
@@ -572,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline unsigned long dma_get_merge_boundary(struct device *dev)
+{
+ return 0;
+}
#endif /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -615,16 +621,14 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+struct page **dma_common_find_pages(void *cpu_addr);
void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+ pgprot_t prot, const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
-int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
bool dma_in_atomic_pool(void *start, size_t size);
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(void *start, size_t size);
@@ -679,6 +683,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
+ dma_get_required_mask(dev);
+}
+
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
@@ -729,13 +747,6 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
-#ifndef dma_max_pfn
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
-}
-#endif
-
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
@@ -747,7 +758,6 @@ static inline int dma_get_cache_alignment(void)
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
-void dma_release_declared_memory(struct device *dev);
#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -755,11 +765,6 @@ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
{
return -ENOSYS;
}
-
-static inline void
-dma_release_declared_memory(struct device *dev)
-{
-}
#endif /* CONFIG_DMA_DECLARE_COHERENT */
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
@@ -779,9 +784,6 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size,
return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
-#ifndef dma_alloc_writecombine
-#define dma_alloc_writecombine dma_alloc_wc
-#endif
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
@@ -789,9 +791,6 @@ static inline void dma_free_wc(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_addr,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_free_writecombine
-#define dma_free_writecombine dma_free_wc
-#endif
static inline int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma,
@@ -801,9 +800,6 @@ static inline int dma_mmap_wc(struct device *dev,
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_mmap_writecombine
-#define dma_mmap_writecombine dma_mmap_wc
-#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME