summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm/arm-smmu
diff options
context:
space:
mode:
authorAshish Mhetre <amhetre@nvidia.com>2022-04-21 11:15:04 +0300
committerWill Deacon <will@kernel.org>2022-04-22 13:21:30 +0300
commit4a25f2ea0e030b2fc852c4059a50181bfc5b2f57 (patch)
treea23a2ff0707c3f9d94b2c1da2bb9bf6511b02831 /drivers/iommu/arm/arm-smmu
parent95d4782c34a60800ccf91d9f0703137d4367a2fc (diff)
downloadlinux-4a25f2ea0e030b2fc852c4059a50181bfc5b2f57.tar.xz
iommu: arm-smmu: disable large page mappings for Nvidia arm-smmu
Tegra194 and Tegra234 SoCs have the erratum that causes walk cache entries to not be invalidated correctly. The problem is that the walk cache index generated for IOVA is not same across translation and invalidation requests. This is leading to page faults when PMD entry is released during unmap and populated with new PTE table during subsequent map request. Disabling large page mappings avoids the release of PMD entry and avoid translations seeing stale PMD entry in walk cache. Fix this by limiting the page mappings to PAGE_SIZE for Tegra194 and Tegra234 devices. This is recommended fix from Tegra hardware design team. Acked-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Co-developed-by: Pritesh Raithatha <praithatha@nvidia.com> Signed-off-by: Pritesh Raithatha <praithatha@nvidia.com> Signed-off-by: Ashish Mhetre <amhetre@nvidia.com> Link: https://lore.kernel.org/r/20220421081504.24678-1-amhetre@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu/arm/arm-smmu')
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 01e9b50b10a1..87bf522b9d2e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
dev_name(dev), err);
}
+static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg,
+ struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ const struct device_node *np = smmu->dev->of_node;
+
+ /*
+ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
+ * entries to not be invalidated correctly. The problem is that the walk
+ * cache index generated for IOVA is not same across translation and
+ * invalidation requests. This is leading to page faults when PMD entry
+ * is released during unmap and populated with new PTE table during
+ * subsequent map request. Disabling large page mappings avoids the
+ * release of PMD entry and avoid translations seeing stale PMD entry in
+ * walk cache.
+ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
+ * Tegra234.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
+ smmu->pgsize_bitmap = PAGE_SIZE;
+ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
+ }
+
+ return 0;
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
static const struct arm_smmu_impl nvidia_smmu_single_impl = {
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)