summaryrefslogtreecommitdiff
path: root/include/linux/iommu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/iommu.h')
-rw-r--r--include/linux/iommu.h66
1 files changed, 62 insertions, 4 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e6a7c9ff72f2..0546b8710ce3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,13 +21,15 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>
#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
-#define IOMMU_EXEC (1 << 3)
+#define IOMMU_NOEXEC (1 << 3)
struct iommu_ops;
struct iommu_group;
@@ -49,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+/* Domain feature flags */
+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
+ implementation */
+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+
+/*
+ * This are the possible domain-types
+ *
+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
+ * devices
+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
+ * for VMs
+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
+ * This flag allows IOMMU drivers to implement
+ * certain optimizations for these domains
+ */
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API)
+
struct iommu_domain {
+ unsigned type;
const struct iommu_ops *ops;
- void *priv;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -61,6 +87,7 @@ enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
};
/*
@@ -97,23 +124,32 @@ enum iommu_attr {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ * to an iommu domain
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
- int (*domain_init)(struct iommu_domain *domain);
- void (*domain_destroy)(struct iommu_domain *domain);
+
+ /* Domain allocation and freeing by the iommu driver */
+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ void (*domain_free)(struct iommu_domain *);
+
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size);
+ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
@@ -132,7 +168,12 @@ struct iommu_ops {
/* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
unsigned long pgsize_bitmap;
+ void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
@@ -156,6 +197,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
+extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg,unsigned int nents,
+ int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -241,6 +285,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
return ret;
}
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return domain->ops->map_sg(domain, iova, sg, nents, prot);
+}
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -293,6 +344,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return -ENODEV;
+}
+
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
u32 wnd_nr, phys_addr_t paddr,
u64 size, int prot)