diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-04 21:04:52 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-04 21:04:52 +0300 |
commit | a7391ad3572431a354c927cf8896e86e50d7d0bf (patch) | |
tree | 2959c9f34e8b95458ddcf670a8200d4e05f93cfe | |
parent | 3118d7ab3f01e309d38b4e719370bdd5361aec6d (diff) | |
parent | 392bf51946c2463436a1ba237c1ec5865b234825 (diff) | |
download | linux-a7391ad3572431a354c927cf8896e86e50d7d0bf.tar.xz |
Merge tag 'iomm-fixes-v5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu fixes from Joerg Roedel:
"IOMMU core:
- Fix for a regression which could cause NULL-ptr dereferences
Arm SMMU:
- Fix off-by-one in SMMUv3 SVA TLB invalidation
- Disable large mappings to workaround nvidia erratum
Intel VT-d:
- Handle PCI stop marker messages in IOMMU driver to meet the
requirement of I/O page fault handling framework.
- Calculate a feasible mask for non-aligned page-selective IOTLB
invalidation.
Apple DART IOMMU:
- Fix potential NULL-ptr dereference
- Set module owner"
* tag 'iomm-fixes-v5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu: Make sysfs robust for non-API groups
iommu/dart: Add missing module owner to ops structure
iommu/dart: check return value after calling platform_get_resource()
iommu/vt-d: Drop stop marker messages
iommu/vt-d: Calculate mask for non-aligned flushes
iommu: arm-smmu: disable large page mappings for Nvidia arm-smmu
iommu/arm-smmu-v3: Fix size calculation in arm_smmu_mm_invalidate_range()
-rw-r--r-- | drivers/iommu/apple-dart.c | 10 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 9 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 30 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 27 | ||||
-rw-r--r-- | drivers/iommu/intel/svm.c | 4 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 9 |
6 files changed, 79 insertions, 10 deletions
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index decafb07ad08..8af0242a90d9 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -773,6 +773,7 @@ static const struct iommu_ops apple_dart_iommu_ops = { .get_resv_regions = apple_dart_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during dart probe */ + .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = apple_dart_attach_dev, .detach_dev = apple_dart_detach_dev, @@ -859,16 +860,15 @@ static int apple_dart_probe(struct platform_device *pdev) dart->dev = dev; spin_lock_init(&dart->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(dart->regs)) + return PTR_ERR(dart->regs); + if (resource_size(res) < 0x4000) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } - dart->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(dart->regs)) - return PTR_ERR(dart->regs); - dart->irq = platform_get_irq(pdev, 0); if (dart->irq < 0) return -ENODEV; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 22ddd05bbdcd..c623dae1e115 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn, { struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - size_t size = end - start + 1; + size_t size; + + /* + * The mm_types defines vm_end as the first byte after the end address, + * different from IOMMU subsystem using the last address of an address + * range. So do a simple translation here by calculating size correctly. + */ + size = end - start; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 01e9b50b10a1..87bf522b9d2e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi dev_name(dev), err); } +static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg, + struct device *dev) +{ + struct arm_smmu_device *smmu = smmu_domain->smmu; + const struct device_node *np = smmu->dev->of_node; + + /* + * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache + * entries to not be invalidated correctly. The problem is that the walk + * cache index generated for IOVA is not same across translation and + * invalidation requests. This is leading to page faults when PMD entry + * is released during unmap and populated with new PTE table during + * subsequent map request. Disabling large page mappings avoids the + * release of PMD entry and avoid translations seeing stale PMD entry in + * walk cache. + * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and + * Tegra234. + */ + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || + of_device_is_compatible(np, "nvidia,tegra194-smmu")) { + smmu->pgsize_bitmap = PAGE_SIZE; + pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; + } + + return 0; +} + static const struct arm_smmu_impl nvidia_smmu_impl = { .read_reg = nvidia_smmu_read_reg, .write_reg = nvidia_smmu_write_reg, @@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = { .global_fault = nvidia_smmu_global_fault, .context_fault = nvidia_smmu_context_fault, .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; static const struct arm_smmu_impl nvidia_smmu_single_impl = { .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index df5c62ecf942..0ea47e17b379 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1588,7 +1588,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, unsigned long pfn, unsigned int pages, int ih, int map) { - unsigned int mask = ilog2(__roundup_pow_of_two(pages)); + unsigned int aligned_pages = __roundup_pow_of_two(pages); + unsigned int mask = ilog2(aligned_pages); uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; u16 did = domain->iommu_did[iommu->seq_id]; @@ -1600,10 +1601,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (domain_use_first_level(domain)) { qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { + unsigned long bitmask = aligned_pages - 1; + + /* + * PSI masks the low order bits of the base address. If the + * address isn't aligned to the mask, then compute a mask value + * needed to ensure the target range is flushed. + */ + if (unlikely(bitmask & pfn)) { + unsigned long end_pfn = pfn + pages - 1, shared_bits; + + /* + * Since end_pfn <= pfn + bitmask, the only way bits + * higher than bitmask can differ in pfn and end_pfn is + * by carrying. This means after masking out bitmask, + * high bits starting with the first set bit in + * shared_bits are all equal in both pfn and end_pfn. + */ + shared_bits = ~(pfn ^ end_pfn) & ~bitmask; + mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; + } + /* * Fallback to domain selective flush if no PSI support or - * the size is too big. PSI requires page size to be 2 ^ x, - * and the base address is naturally aligned to the size. + * the size is too big. */ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 23a38763c1d1..7ee37d996e15 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -757,6 +757,10 @@ bad_req: goto bad_req; } + /* Drop Stop Marker message. No need for a response. */ + if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) + goto prq_advance; + if (!svm || svm->pasid != req->pasid) { /* * It can't go away, because the driver is not permitted diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f2c45b85b9fc..857d4c2fd1a2 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -506,6 +506,13 @@ int iommu_get_group_resv_regions(struct iommu_group *group, list_for_each_entry(device, &group->devices, list) { struct list_head dev_resv_regions; + /* + * Non-API groups still expose reserved_regions in sysfs, + * so filter out calls that get here that way. + */ + if (!device->dev->iommu) + break; + INIT_LIST_HEAD(&dev_resv_regions); iommu_get_resv_regions(device->dev, &dev_resv_regions); ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); @@ -3019,7 +3026,7 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - if (WARN_ON(!group)) + if (WARN_ON(!group) || !group->default_domain) return -EINVAL; if (sysfs_streq(buf, "identity")) |