diff options
author | Lu Baolu <baolu.lu@linux.intel.com> | 2021-07-20 05:06:15 +0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2021-07-26 14:56:25 +0300 |
commit | 75cc1018a9e1e57d4ae43a101fc08a070894d439 (patch) | |
tree | dde8f9d8f04f7b2a8d019bdfaff94bd851f5acad /drivers/iommu | |
parent | 3f34f125977685e591def32984c77e23a86075b0 (diff) | |
download | linux-75cc1018a9e1e57d4ae43a101fc08a070894d439.tar.xz |
iommu/vt-d: Move clflush'es from iotlb_sync_map() to map_pages()
As the Intel VT-d driver has switched to use the iommu_ops.map_pages()
callback, multiple pages of the same size will be mapped in a call.
There's no need to put the clflush'es in iotlb_sync_map() callback.
Move them back into __domain_mapping() to simplify the code.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20210720020615.4144323-4-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel/iommu.c | 48 |
1 files changed, 7 insertions, 41 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e4c74296b34a..c12cc955389a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2333,9 +2333,9 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phys_pfn, unsigned long nr_pages, int prot) { + struct dma_pte *first_pte = NULL, *pte = NULL; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; - struct dma_pte *pte = NULL; phys_addr_t pteval; u64 attr; @@ -2368,6 +2368,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); if (!pte) return -ENOMEM; + first_pte = pte; + /* It is large page*/ if (largepage_lvl > 1) { unsigned long end_pfn; @@ -2415,14 +2417,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, * recalculate 'pte' and switch back to smaller pages for the * end of the mapping, if the trailing size is not enough to * use another superpage (i.e. nr_pages < lvl_pages). - * - * We leave clflush for the leaf pte changes to iotlb_sync_map() - * callback. */ pte++; if (!nr_pages || first_pte_in_page(pte) || - (largepage_lvl > 1 && nr_pages < lvl_pages)) + (largepage_lvl > 1 && nr_pages < lvl_pages)) { + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); pte = NULL; + } } return 0; @@ -5563,39 +5565,6 @@ static bool risky_device(struct pci_dev *pdev) return false; } -static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn, - unsigned long clf_pages) -{ - struct dma_pte *first_pte = NULL, *pte = NULL; - unsigned long lvl_pages = 0; - int level = 0; - - while (clf_pages > 0) { - if (!pte) { - level = 0; - pte = pfn_to_dma_pte(domain, clf_pfn, &level); - if (WARN_ON(!pte)) - return; - first_pte = pte; - lvl_pages = lvl_to_nr_pages(level); - } - - if (WARN_ON(!lvl_pages || clf_pages < lvl_pages)) - return; - - clf_pages -= lvl_pages; - clf_pfn += lvl_pages; - pte++; - - if (!clf_pages || first_pte_in_page(pte) || - (level > 1 && clf_pages < lvl_pages)) { - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - pte = NULL; - } - } -} - static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) { @@ -5605,9 +5574,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, struct intel_iommu *iommu; int iommu_id; - if (!dmar_domain->iommu_coherency) - clflush_sync_map(dmar_domain, pfn, pages); - for_each_domain_iommu(iommu_id, dmar_domain) { iommu = g_iommus[iommu_id]; __mapping_notify_one(iommu, dmar_domain, pfn, pages); |