diff options
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 124 |
1 files changed, 105 insertions, 19 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f90251572a5d..17dd683b2fce 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -21,6 +21,7 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/list_sort.h> +#include <linux/memremap.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pci.h> @@ -64,6 +65,7 @@ struct iommu_dma_cookie { /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; + struct mutex mutex; }; static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); @@ -310,6 +312,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain) if (!domain->iova_cookie) return -ENOMEM; + mutex_init(&domain->iova_cookie->mutex); return 0; } @@ -385,7 +388,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) - iort_iommu_msi_get_resv_regions(dev, list); + iort_iommu_get_resv_regions(dev, list); } EXPORT_SYMBOL(iommu_dma_get_resv_regions); @@ -560,26 +563,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } /* start_pfn is always nonzero for an already-initialised domain */ + mutex_lock(&cookie->mutex); if (iovad->start_pfn) { if (1UL << order != iovad->granule || base_pfn != iovad->start_pfn) { pr_warn("Incompatible range for DMA domain\n"); - return -EFAULT; + ret = -EFAULT; + goto done_unlock; } - return 0; + ret = 0; + goto done_unlock; } init_iova_domain(iovad, 1UL << order, base_pfn); ret = iova_domain_init_rcaches(iovad); if (ret) - return ret; + goto done_unlock; /* If the FQ fails we can simply fall back to strict mode */ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) domain->type = IOMMU_DOMAIN_DMA; - return iova_reserve_iommu_regions(dev, domain); + ret = iova_reserve_iommu_regions(dev, domain); + +done_unlock: + mutex_unlock(&cookie->mutex); + return ret; } /** @@ -1053,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, for_each_sg(sg, s, nents, i) { /* Restore this segment's original unaligned fields first */ + dma_addr_t s_dma_addr = sg_dma_address(s); unsigned int s_iova_off = sg_dma_address(s); unsigned int s_length = sg_dma_len(s); unsigned int s_iova_len = s->length; - s->offset += s_iova_off; - s->length = s_length; sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; + if (sg_is_dma_bus_address(s)) { + if (i > 0) + cur = sg_next(cur); + + sg_dma_unmark_bus_address(s); + sg_dma_address(cur) = s_dma_addr; + sg_dma_len(cur) = s_length; + sg_dma_mark_bus_address(cur); + count++; + cur_len = 0; + continue; + } + + s->offset += s_iova_off; + s->length = s_length; + /* * Now fill in the real DMA data. If... * - there is a valid output segment to append to @@ -1102,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int i; for_each_sg(sg, s, nents, i) { - if (sg_dma_address(s) != DMA_MAPPING_ERROR) - s->offset += sg_dma_address(s); - if (sg_dma_len(s)) - s->length = sg_dma_len(s); + if (sg_is_dma_bus_address(s)) { + sg_dma_unmark_bus_address(s); + } else { + if (sg_dma_address(s) != DMA_MAPPING_ERROR) + s->offset += sg_dma_address(s); + if (sg_dma_len(s)) + s->length = sg_dma_len(s); + } sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; } @@ -1158,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); + struct pci_p2pdma_map_state p2pdma_state = {}; + enum pci_p2pdma_map_type map; dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); @@ -1187,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, size_t s_length = s->length; size_t pad_len = (mask - iova_len + 1) & mask; + if (is_pci_p2pdma_page(sg_page(s))) { + map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); + switch (map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + /* + * iommu_map_sg() will skip this segment as + * it is marked as a bus address, + * __finalise_sg() will copy the dma address + * into the output segment. + */ + continue; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + /* + * Mapping through host bridge should be + * mapped with regular IOVAs, thus we + * do nothing here and continue below. + */ + break; + default: + ret = -EREMOTEIO; + goto out_restore_sg; + } + } + sg_dma_address(s) = s_iova_off; sg_dma_len(s) = s_length; s->offset -= s_iova_off; @@ -1215,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } + if (!iova_len) + return __finalise_sg(dev, sg, nents, 0); + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) { ret = -ENOMEM; @@ -1236,7 +1294,7 @@ out_free_iova: out_restore_sg: __invalidate_sg(sg, nents); out: - if (ret != -ENOMEM) + if (ret != -ENOMEM && ret != -EREMOTEIO) return -EINVAL; return ret; } @@ -1244,7 +1302,7 @@ out: static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - dma_addr_t start, end; + dma_addr_t end = 0, start; struct scatterlist *tmp; int i; @@ -1258,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, /* * The scatterlist segments are mapped into a single - * contiguous IOVA allocation, so this is incredibly easy. + * contiguous IOVA allocation, the start and end points + * just have to be determined. */ - start = sg_dma_address(sg); - for_each_sg(sg_next(sg), tmp, nents - 1, i) { + for_each_sg(sg, tmp, nents, i) { + if (sg_is_dma_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + + if (sg_dma_len(tmp) == 0) + break; + + start = sg_dma_address(tmp); + break; + } + + nents -= i; + for_each_sg(tmp, tmp, nents, i) { + if (sg_is_dma_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + if (sg_dma_len(tmp) == 0) break; - sg = tmp; + + end = sg_dma_address(tmp) + sg_dma_len(tmp); } - end = sg_dma_address(sg) + sg_dma_len(sg); - __iommu_dma_unmap(dev, start, end - start); + + if (end) + __iommu_dma_unmap(dev, start, end - start); } static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, @@ -1459,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev) return (1UL << __ffs(domain->pgsize_bitmap)) - 1; } +static size_t iommu_dma_opt_mapping_size(void) +{ + return iova_rcache_range(); +} + static const struct dma_map_ops iommu_dma_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, .alloc_pages = dma_common_alloc_pages, @@ -1479,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = { .map_resource = iommu_dma_map_resource, .unmap_resource = iommu_dma_unmap_resource, .get_merge_boundary = iommu_dma_get_merge_boundary, + .opt_mapping_size = iommu_dma_opt_mapping_size, }; /* |