diff options
author | Lu Baolu <baolu.lu@linux.intel.com> | 2022-07-12 03:08:58 +0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2022-07-15 11:21:37 +0300 |
commit | 8430fd3f3287013cb3f04b465c0686c41c86a8c8 (patch) | |
tree | d007cc8d73ad3b142d6249d7b6ef0843d9977655 /drivers/iommu | |
parent | 2c3262f9e881ae403b6d92a7e4824531cdb63829 (diff) | |
download | linux-8430fd3f3287013cb3f04b465c0686c41c86a8c8.tar.xz |
iommu/vt-d: Acquiring lock in pasid manipulation helpers
The iommu->lock is used to protect the per-IOMMU pasid directory table
and pasid table. Move the spinlock acquisition/release into the helpers
to make the code self-contained.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20220706025524.2904370-8-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel/iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.c | 103 | ||||
-rw-r--r-- | drivers/iommu/intel/svm.c | 3 |
3 files changed, 55 insertions, 53 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7f03576e72d7..3d53de8c7634 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2489,7 +2489,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) } /* Setup the PASID entry for requests without PASID: */ - spin_lock(&iommu->lock); if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, dev, PASID_RID2PASID); @@ -2499,7 +2498,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) else ret = intel_pasid_setup_second_level(iommu, domain, dev, PASID_RID2PASID); - spin_unlock(&iommu->lock); if (ret) { dev_err(dev, "Setup RID2PASID failed\n"); dmar_remove_one_dev_info(dev); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 43f090381ec7..7792a1b2ebc4 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -450,17 +450,17 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, struct pasid_entry *pte; u16 did, pgtt; + spin_lock(&iommu->lock); pte = intel_pasid_get_entry(dev, pasid); - if (WARN_ON(!pte)) - return; - - if (!pasid_pte_is_present(pte)) + if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); return; + } did = pasid_get_domain_id(pte); pgtt = pasid_pte_get_pgtt(pte); - intel_pasid_clear_entry(dev, pasid, fault_ignore); + spin_unlock(&iommu->lock); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); @@ -496,22 +496,6 @@ static void pasid_flush_caches(struct intel_iommu *iommu, } } -static inline int pasid_enable_wpe(struct pasid_entry *pte) -{ -#ifdef CONFIG_X86 - unsigned long cr0 = read_cr0(); - - /* CR0.WP is normally set but just to be sure */ - if (unlikely(!(cr0 & X86_CR0_WP))) { - pr_err_ratelimited("No CPU write protect!\n"); - return -EINVAL; - } -#endif - pasid_set_wpe(pte); - - return 0; -}; - /* * Set up the scalable mode pasid table entry for first only * translation type. @@ -528,39 +512,52 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, return -EINVAL; } - pte = intel_pasid_get_entry(dev, pasid); - if (WARN_ON(!pte)) + if (flags & PASID_FLAG_SUPERVISOR_MODE) { +#ifdef CONFIG_X86 + unsigned long cr0 = read_cr0(); + + /* CR0.WP is normally set but just to be sure */ + if (unlikely(!(cr0 & X86_CR0_WP))) { + pr_err("No CPU write protect!\n"); + return -EINVAL; + } +#endif + if (!ecap_srs(iommu->ecap)) { + pr_err("No supervisor request support on %s\n", + iommu->name); + return -EINVAL; + } + } + + if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) { + pr_err("No 5-level paging support for first-level on %s\n", + iommu->name); return -EINVAL; + } - /* Caller must ensure PASID entry is not in use. */ - if (pasid_pte_is_present(pte)) + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); return -EBUSY; + } pasid_clear_entry(pte); /* Setup the first level page table pointer: */ pasid_set_flptr(pte, (u64)__pa(pgd)); if (flags & PASID_FLAG_SUPERVISOR_MODE) { - if (!ecap_srs(iommu->ecap)) { - pr_err("No supervisor request support on %s\n", - iommu->name); - return -EINVAL; - } pasid_set_sre(pte); - if (pasid_enable_wpe(pte)) - return -EINVAL; - + pasid_set_wpe(pte); } - if (flags & PASID_FLAG_FL5LP) { - if (cap_5lp_support(iommu->cap)) { - pasid_set_flpm(pte, 1); - } else { - pr_err("No 5-level paging support for first-level\n"); - pasid_clear_entry(pte); - return -EINVAL; - } - } + if (flags & PASID_FLAG_FL5LP) + pasid_set_flpm(pte, 1); if (flags & PASID_FLAG_PAGE_SNOOP) pasid_set_pgsnp(pte); @@ -572,6 +569,8 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, /* Setup Present and PASID Granular Transfer Type: */ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); pasid_set_present(pte); + spin_unlock(&iommu->lock); + pasid_flush_caches(iommu, pte, pasid, did); return 0; @@ -629,15 +628,17 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pgd_val = virt_to_phys(pgd); did = domain->iommu_did[iommu->seq_id]; + spin_lock(&iommu->lock); pte = intel_pasid_get_entry(dev, pasid); if (!pte) { - dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid); + spin_unlock(&iommu->lock); return -ENODEV; } - /* Caller must ensure PASID entry is not in use. */ - if (pasid_pte_is_present(pte)) + if (pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); return -EBUSY; + } pasid_clear_entry(pte); pasid_set_domain_id(pte, did); @@ -654,6 +655,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, if (pasid != PASID_RID2PASID) pasid_set_sre(pte); pasid_set_present(pte); + spin_unlock(&iommu->lock); + pasid_flush_caches(iommu, pte, pasid, did); return 0; @@ -669,15 +672,17 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, u16 did = FLPT_DEFAULT_DID; struct pasid_entry *pte; + spin_lock(&iommu->lock); pte = intel_pasid_get_entry(dev, pasid); if (!pte) { - dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid); + spin_unlock(&iommu->lock); return -ENODEV; } - /* Caller must ensure PASID entry is not in use. */ - if (pasid_pte_is_present(pte)) + if (pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); return -EBUSY; + } pasid_clear_entry(pte); pasid_set_domain_id(pte, did); @@ -692,6 +697,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, */ pasid_set_sre(pte); pasid_set_present(pte); + spin_unlock(&iommu->lock); + pasid_flush_caches(iommu, pte, pasid, did); return 0; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 82288a50660d..64072e628bbd 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -394,11 +394,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ? PASID_FLAG_SUPERVISOR_MODE : 0; sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; - spin_lock(&iommu->lock); ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, FLPT_DEFAULT_DID, sflags); - spin_unlock(&iommu->lock); - if (ret) goto free_sdev; |