diff options
author | Joerg Roedel <jroedel@suse.de> | 2022-05-13 16:17:33 +0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2022-05-13 16:17:33 +0300 |
commit | e3b9bf145cb6a0c0ffa106d5a78029c7c3e94ef7 (patch) | |
tree | f04f697c42bfe1b9a253cb89ca9d24ed7358e40c /drivers/iommu | |
parent | af2d861d4cd2a4da5137f795ee3509e6f944a25b (diff) | |
parent | 628bf55b620497a105f4963ee8fb84769f7e6bb4 (diff) | |
download | linux-e3b9bf145cb6a0c0ffa106d5a78029c7c3e94ef7.tar.xz |
Merge tag 'arm-smmu-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu
Arm SMMU updates for 5.19
- Add new Qualcomm device-tree compatible strings
- Add new Nvidia device-tree compatible string for Tegra234
- Fix UAF in SMMUv3 shared virtual addressing code
- Force identity-mapped domains for users of ye olde SMMU legacy binding
- Minor cleanups
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 22 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 2 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 3 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 30 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 1 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu.c | 8 |
6 files changed, 59 insertions, 7 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 22ddd05bbdcd..1ef7bbb4acf3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -6,6 +6,7 @@ #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/mmu_notifier.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include "arm-smmu-v3.h" @@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) struct arm_smmu_ctx_desc *cd; struct arm_smmu_ctx_desc *ret = NULL; + /* Don't free the mm until we release the ASID */ + mmgrab(mm); + asid = arm64_mm_context_get(mm); - if (!asid) - return ERR_PTR(-ESRCH); + if (!asid) { + err = -ESRCH; + goto out_drop_mm; + } cd = kzalloc(sizeof(*cd), GFP_KERNEL); if (!cd) { @@ -165,6 +171,8 @@ out_free_cd: kfree(cd); out_put_context: arm64_mm_context_put(mm); +out_drop_mm: + mmdrop(mm); return err < 0 ? ERR_PTR(err) : ret; } @@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) if (arm_smmu_free_asid(cd)) { /* Unpin ASID */ arm64_mm_context_put(cd->mm); + mmdrop(cd->mm); kfree(cd); } } @@ -183,7 +192,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn, { struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - size_t size = end - start + 1; + size_t size; + + /* + * The mm_types defines vm_end as the first byte after the end address, + * different from IOMMU subsystem using the last address of an address + * range. So do a simple translation here by calculating size correctly. + */ + size = end - start; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 627a3ed5ee8f..88817a3376ef 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3770,6 +3770,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; if (resource_size(res) < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 2c25cce38060..658f3cc83278 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; - if (of_device_is_compatible(np, "nvidia,tegra194-smmu") || + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || + of_device_is_compatible(np, "nvidia,tegra194-smmu") || of_device_is_compatible(np, "nvidia,tegra186-smmu")) return nvidia_smmu_impl_init(smmu); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 01e9b50b10a1..87bf522b9d2e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi dev_name(dev), err); } +static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg, + struct device *dev) +{ + struct arm_smmu_device *smmu = smmu_domain->smmu; + const struct device_node *np = smmu->dev->of_node; + + /* + * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache + * entries to not be invalidated correctly. The problem is that the walk + * cache index generated for IOVA is not same across translation and + * invalidation requests. This is leading to page faults when PMD entry + * is released during unmap and populated with new PTE table during + * subsequent map request. Disabling large page mappings avoids the + * release of PMD entry and avoid translations seeing stale PMD entry in + * walk cache. + * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and + * Tegra234. + */ + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || + of_device_is_compatible(np, "nvidia,tegra194-smmu")) { + smmu->pgsize_bitmap = PAGE_SIZE; + pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; + } + + return 0; +} + static const struct arm_smmu_impl nvidia_smmu_impl = { .read_reg = nvidia_smmu_read_reg, .write_reg = nvidia_smmu_write_reg, @@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = { .global_fault = nvidia_smmu_global_fault, .context_fault = nvidia_smmu_context_fault, .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; static const struct arm_smmu_impl nvidia_smmu_single_impl = { .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index ba6298c7140e..7820711c4560 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -408,6 +408,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { { .compatible = "qcom,sc7180-smmu-500" }, { .compatible = "qcom,sc7280-smmu-500" }, { .compatible = "qcom,sc8180x-smmu-500" }, + { .compatible = "qcom,sc8280xp-smmu-500" }, { .compatible = "qcom,sdm630-smmu-v2" }, { .compatible = "qcom,sdm845-smmu-500" }, { .compatible = "qcom,sm6125-smmu-500" }, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 568cce590ccc..2ed3594f384e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1574,6 +1574,9 @@ static int arm_smmu_def_domain_type(struct device *dev) struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); const struct arm_smmu_impl *impl = cfg->smmu->impl; + if (using_legacy_binding) + return IOMMU_DOMAIN_IDENTITY; + if (impl && impl->def_domain_type) return impl->def_domain_type(dev); @@ -2092,11 +2095,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ioaddr = res->start; - smmu->base = devm_ioremap_resource(dev, res); + smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); + ioaddr = res->start; /* * The resource size should effectively match the value of SMMU_TOP; * stash that temporarily until we know PAGESIZE to validate it with. |