diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-21 21:32:03 +0300 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-12-06 17:56:47 +0300 |
commit | 964f2311a6862f1fbcc044d0828ad90030928b7f (patch) | |
tree | bea40579beba26f1405c548ca048158e2a097640 /drivers/iommu/intel-iommu.c | |
parent | b3aa14f022543ed86823c97c145495b747102fa9 (diff) | |
download | linux-964f2311a6862f1fbcc044d0828ad90030928b7f.tar.xz |
iommu/intel: small map_page cleanup
Pass the page + offset to the low-level __iommu_map_single helper
(which gets renamed to fit the new calling conventions) as both
callers have the page at hand.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 41a4b8808802..66b4444398ae 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev) return 0; } -static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir, u64 dma_mask) +static dma_addr_t __intel_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int dir, + u64 dma_mask) { + phys_addr_t paddr = page_to_phys(page) + offset; struct dmar_domain *domain; phys_addr_t start_paddr; unsigned long iova_pfn; @@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return __intel_map_single(dev, page_to_phys(page) + offset, size, - dir, *dev->dma_mask); + return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask); } static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) @@ -3753,9 +3754,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, return NULL; memset(page_address(page), 0, size); - *dma_handle = __intel_map_single(dev, page_to_phys(page), size, - DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); + *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); if (*dma_handle) return page_address(page); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |