diff options
author | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-04 21:01:19 +0300 |
---|---|---|
committer | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-12 17:46:40 +0300 |
commit | efe6d9649b1d6b85b50cef64745df2e6749a8a45 (patch) | |
tree | e9aca55fa1fa29fea638ee52832fa9691fdd6f02 /drivers/iommu/dma-iommu.c | |
parent | ab95859fee776e58934d2b0cc1f4e93810e66508 (diff) | |
parent | 49caedb668e476c100d727f2174724e0610a2b92 (diff) | |
download | linux-efe6d9649b1d6b85b50cef64745df2e6749a8a45.tar.xz |
Merge commit '49caedb668e476c100d727f2174724e0610a2b92' of https://github.com/openbmc/linux into openbmc/dev-5.15-intel-bump_v5.15.36
Signed-off-by: Sujoy Ray <sujoy.ray@intel.com>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 155 |
1 files changed, 71 insertions, 84 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2d6021644000..276b6fcc80ad 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -317,6 +317,11 @@ static bool dev_is_untrusted(struct device *dev) return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; } +static bool dev_use_swiotlb(struct device *dev) +{ + return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); +} + /* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain) { @@ -510,23 +515,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); } -static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - struct iommu_domain *domain = iommu_get_dma_domain(dev); - phys_addr_t phys; - - phys = iommu_iova_to_phys(domain, dma_addr); - if (WARN_ON(!phys)) - return; - - __iommu_dma_unmap(dev, dma_addr, size); - - if (unlikely(is_swiotlb_buffer(dev, phys))) - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); -} - static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask) { @@ -553,55 +541,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, return iova + iova_off; } -static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, - size_t org_size, dma_addr_t dma_mask, bool coherent, - enum dma_data_direction dir, unsigned long attrs) -{ - int prot = dma_info_to_prot(dir, coherent, attrs); - struct iommu_domain *domain = iommu_get_dma_domain(dev); - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; - size_t aligned_size = org_size; - void *padding_start; - size_t padding_size; - dma_addr_t iova; - - /* - * If both the physical buffer start address and size are - * page aligned, we don't need to use a bounce page. - */ - if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) && - iova_offset(iovad, phys | org_size)) { - aligned_size = iova_align(iovad, org_size); - phys = swiotlb_tbl_map_single(dev, phys, org_size, - aligned_size, dir, attrs); - - if (phys == DMA_MAPPING_ERROR) - return DMA_MAPPING_ERROR; - - /* Cleanup the padding area. */ - padding_start = phys_to_virt(phys); - padding_size = aligned_size; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) { - padding_start += org_size; - padding_size -= org_size; - } - - memset(padding_start, 0, padding_size); - } - - if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(phys, org_size, dir); - - iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); - if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) - swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); - return iova; -} - static void __iommu_dma_free_pages(struct page **pages, int count) { while (count--) @@ -797,7 +736,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -813,7 +752,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -831,7 +770,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); @@ -847,7 +786,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_device(dev, sg_dma_address(sg), @@ -863,17 +802,66 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, { phys_addr_t phys = page_to_phys(page) + offset; bool coherent = dev_is_dma_coherent(dev); + int prot = dma_info_to_prot(dir, coherent, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + dma_addr_t iova, dma_mask = dma_get_mask(dev); + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { + void *padding_start; + size_t padding_size, aligned_size; + + aligned_size = iova_align(iovad, size); + phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, + iova_mask(iovad), dir, attrs); + + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + /* Cleanup the padding area. */ + padding_start = phys_to_virt(phys); + padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { + padding_start += size; + padding_size -= size; + } + + memset(padding_start, 0, padding_size); + } - return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), - coherent, dir, attrs); + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + + iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + return iova; } static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); - __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, dma_handle); + if (WARN_ON(!phys)) + return; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + __iommu_dma_unmap(dev, dma_handle, size); + + if (unlikely(is_swiotlb_buffer(dev, phys))) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } /* @@ -958,7 +946,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s int i; for_each_sg(sg, s, nents, i) - __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s), + iommu_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); } @@ -969,9 +957,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s), - s->length, dma_get_mask(dev), - dev_is_dma_coherent(dev), dir, attrs); + sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), + s->offset, s->length, dir, attrs); if (sg_dma_address(s) == DMA_MAPPING_ERROR) goto out_unmap; sg_dma_len(s) = s->length; @@ -1011,7 +998,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, goto out; } - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1089,14 +1076,14 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, struct scatterlist *tmp; int i; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); - - if (dev_is_untrusted(dev)) { + if (dev_use_swiotlb(dev)) { iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return; } + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); + /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy. |