summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm
diff options
context:
space:
mode:
authorShameer Kolothum <shameerali.kolothum.thodi@huawei.com>2022-06-15 13:10:42 +0300
committerJoerg Roedel <jroedel@suse.de>2022-07-06 13:51:11 +0300
commit6c998abb1ea54b3d83420f68ce382354a30f2238 (patch)
tree1670d6ea3e5faab3ab7f2fca0b9281e1c128652c /drivers/iommu/arm
parent04e2afd1a71c98e8ad84e5e6bf8352c3de6e6eaf (diff)
downloadlinux-6c998abb1ea54b3d83420f68ce382354a30f2238.tar.xz
iommu/arm-smmu-v3: Refactor arm_smmu_init_bypass_stes() to force bypass
By default, disable_bypass flag is set and any dev without an iommu domain installs STE with CFG_ABORT during arm_smmu_init_bypass_stes(). Introduce a "force" flag and move the STE update logic to arm_smmu_init_bypass_stes() so that we can force it to install CFG_BYPASS STE for specific SIDs. This will be useful in a follow-up patch to install bypass for IORT RMR SIDs. Tested-by: Hanjun Guo <guohanjun@huawei.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20220615101044.1972-8-shameerali.kolothum.thodi@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 17d4f3432df2..09723861a08a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
{
unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab);
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -3051,7 +3060,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
return 0;
}