From 6d1bcb957be2850e0776f24c289e1f87c256baeb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:43:07 +0100 Subject: iommu: Remove empty iommu_tlb_range_add() callback from iommu_ops Commit add02cfdc9bc ("iommu: Introduce Interface for IOMMU TLB Flushing") added three new TLB flushing operations to the IOMMU API so that the underlying driver operations can be batched when unmapping large regions of IO virtual address space. However, the ->iotlb_range_add() callback has not been implemented by any IOMMU drivers (amd_iommu.c implements it as an empty function, which incurs the overhead of an indirect branch). Instead, drivers either flush the entire IOTLB in the ->iotlb_sync() callback or perform the necessary invalidation during ->unmap(). Attempting to implement ->iotlb_range_add() for arm-smmu-v3.c revealed two major issues: 1. The page size used to map the region in the page-table is not known, and so it is not generally possible to issue TLB flushes in the most efficient manner. 2. The only mutable state passed to the callback is a pointer to the iommu_domain, which can be accessed concurrently and therefore requires expensive synchronisation to keep track of the outstanding flushes. Remove the callback entirely in preparation for extending ->unmap() and ->iotlb_sync() to update a token on the caller's stack. Signed-off-by: Will Deacon --- drivers/iommu/amd_iommu.c | 6 ------ drivers/iommu/iommu.c | 3 --- drivers/vfio/vfio_iommu_type1.c | 1 - 3 files changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..f93b148cf55e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3196,11 +3196,6 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) domain_flush_complete(dom); } -static void amd_iommu_iotlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ -} - const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3219,7 +3214,6 @@ const struct iommu_ops amd_iommu_ops = { .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_range_add = amd_iommu_iotlb_range_add, .iotlb_sync = amd_iommu_flush_iotlb_all, }; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0c674d80c37f..6d7b25fe2474 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1903,9 +1903,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain, if (!unmapped_page) break; - if (sync && ops->iotlb_range_add) - ops->iotlb_range_add(domain, iova, pgsize); - pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 054391f30fa8..fad7fd8c167c 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -696,7 +696,6 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, if (!unmapped) { kfree(entry); } else { - iommu_tlb_range_add(domain->domain, *iova, unmapped); entry->iova = *iova; entry->phys = phys; entry->len = unmapped; -- cgit v1.2.3 From f71da46719460acd5afa411e52dc8cdf1cb9b0ce Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:43:24 +0100 Subject: iommu/io-pgtable-arm: Remove redundant call to io_pgtable_tlb_sync() Commit b6b65ca20bc9 ("iommu/io-pgtable-arm: Add support for non-strict mode") added an unconditional call to io_pgtable_tlb_sync() immediately after the case where we replace a block entry with a table entry during an unmap() call. This is redundant, since the IOMMU API will call iommu_tlb_sync() on this path and the patch in question mentions this: | To save having to reason about it too much, make sure the invalidation | in arm_lpae_split_blk_unmap() just performs its own unconditional sync | to minimise the window in which we're technically violating the break- | before-make requirement on a live mapping. This might work out redundant | with an outer-level sync for strict unmaps, but we'll never be splitting | blocks on a DMA fastpath anyway. However, this sync gets in the way of deferred TLB invalidation for leaf entries and is at best a questionable, unproven hack. Remove it. Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 1 - drivers/iommu/io-pgtable-arm.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 0fc8dfab2abf..a62733c6a632 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -587,7 +587,6 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, } io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - io_pgtable_tlb_sync(&data->iop); return size; } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 161a7d56264d..0d6633921c1e 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -583,7 +583,6 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - io_pgtable_tlb_sync(&data->iop); return size; } -- cgit v1.2.3 From 298f78895b081911e0b3605f07d79ebd3d4cf7b0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:43:34 +0100 Subject: iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops In preparation for TLB flush gathering in the IOMMU API, rename the iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which better describes its purpose and avoids the potential for confusion between different levels of the API. $ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g' Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 4 ++-- drivers/iommu/arm-smmu.c | 8 ++++---- drivers/iommu/io-pgtable-arm-v7s.c | 2 +- drivers/iommu/io-pgtable-arm.c | 2 +- drivers/iommu/ipmmu-vmsa.c | 4 ++-- drivers/iommu/msm_iommu.c | 4 ++-- drivers/iommu/mtk_iommu.c | 4 ++-- drivers/iommu/qcom_iommu.c | 4 ++-- include/linux/io-pgtable.h | 6 +++--- 10 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 92ac995dd9c6..17bceb11e708 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -257,7 +257,7 @@ static void mmu_tlb_sync_context(void *cookie) // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } -static const struct iommu_gather_ops mmu_tlb_ops = { +static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index a9a9fabd3968..7e137e1e28f1 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,7 +1603,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } -static const struct iommu_gather_ops arm_smmu_gather_ops = { +static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync, @@ -1796,7 +1796,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, - .tlb = &arm_smmu_gather_ops, + .tlb = &arm_smmu_flush_ops, .iommu_dev = smmu->dev, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 64977c131ee6..dc08db347ef3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -251,7 +251,7 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct iommu_gather_ops *tlb_ops; + const struct iommu_flush_ops *tlb_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; bool non_strict; @@ -547,19 +547,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } -static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { +static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a62733c6a632..116f97ee991e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -817,7 +817,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 0d6633921c1e..402f913b6f6d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1081,7 +1081,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops __initconst = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ad0098c0c87c..2c14a2c65b22 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -367,7 +367,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, /* The hardware doesn't support selective TLB flush. */ } -static const struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, @@ -480,7 +480,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; + domain->cfg.tlb = &ipmmu_flush_ops; domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; /* diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b25e2eb9e038..8b602384a385 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -178,7 +178,7 @@ static void __flush_iotlb_sync(void *cookie) */ } -static const struct iommu_gather_ops msm_iommu_gather_ops = { +static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_add_flush = __flush_iotlb_range, .tlb_sync = __flush_iotlb_sync, @@ -345,7 +345,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &msm_iommu_gather_ops, + .tlb = &msm_iommu_flush_ops, .iommu_dev = priv->dev, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 82e4be4dfdaf..fed77658d67e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -188,7 +188,7 @@ static void mtk_iommu_tlb_sync(void *cookie) } } -static const struct iommu_gather_ops mtk_iommu_gather_ops = { +static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_sync = mtk_iommu_tlb_sync, @@ -267,7 +267,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &mtk_iommu_gather_ops, + .tlb = &mtk_iommu_flush_ops, .iommu_dev = data->dev, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..fd9d9f4da735 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -164,7 +164,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } -static const struct iommu_gather_ops qcom_gather_ops = { +static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_sync = qcom_iommu_tlb_sync, @@ -215,7 +215,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 40, - .tlb = &qcom_gather_ops, + .tlb = &qcom_flush_ops, .iommu_dev = qcom_iommu->dev, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b5a450a3bb47..6292ea15d674 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -17,7 +17,7 @@ enum io_pgtable_fmt { }; /** - * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. + * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. * * @tlb_flush_all: Synchronously invalidate the entire TLB context. * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. @@ -28,7 +28,7 @@ enum io_pgtable_fmt { * Note that these can all be called in atomic context and must therefore * not block. */ -struct iommu_gather_ops { +struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie); @@ -84,7 +84,7 @@ struct io_pgtable_cfg { unsigned int ias; unsigned int oas; bool coherent_walk; - const struct iommu_gather_ops *tlb; + const struct iommu_flush_ops *tlb; struct device *iommu_dev; /* Low-level data specific to the table format */ -- cgit v1.2.3 From a7d20dc19d9ea7012227be5144353012ffa3ddc4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:43:48 +0100 Subject: iommu: Introduce struct iommu_iotlb_gather for batching TLB flushes To permit batching of TLB flushes across multiple calls to the IOMMU driver's ->unmap() implementation, introduce a new structure for tracking the address range to be flushed and the granularity at which the flushing is required. This is hooked into the IOMMU API and its caller are updated to make use of the new structure. Subsequent patches will plumb this into the IOMMU drivers as well, but for now the gathering information is ignored. Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 9 +++++++-- drivers/iommu/iommu.c | 19 +++++++++++------- drivers/vfio/vfio_iommu_type1.c | 26 ++++++++++++++++--------- include/linux/iommu.h | 43 +++++++++++++++++++++++++++++++++++++---- 4 files changed, 75 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a7f9c3edbcb2..80beb1f5994a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -444,13 +444,18 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; size_t iova_off = iova_offset(iovad, dma_addr); + struct iommu_iotlb_gather iotlb_gather; + size_t unmapped; dma_addr -= iova_off; size = iova_align(iovad, size + iova_off); + iommu_iotlb_gather_init(&iotlb_gather); + + unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); + WARN_ON(unmapped != size); - WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); if (!cookie->fq_domain) - iommu_tlb_sync(domain); + iommu_tlb_sync(domain, &iotlb_gather); iommu_dma_free_iova(cookie, dma_addr, size); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6d7b25fe2474..d67222fdfe44 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1862,7 +1862,7 @@ EXPORT_SYMBOL_GPL(iommu_map); static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, - bool sync) + struct iommu_iotlb_gather *iotlb_gather) { const struct iommu_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; @@ -1910,9 +1910,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unmapped += unmapped_page; } - if (sync && ops->iotlb_sync) - ops->iotlb_sync(domain); - trace_unmap(orig_iova, size, unmapped); return unmapped; } @@ -1920,14 +1917,22 @@ static size_t __iommu_unmap(struct iommu_domain *domain, size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - return __iommu_unmap(domain, iova, size, true); + struct iommu_iotlb_gather iotlb_gather; + size_t ret; + + iommu_iotlb_gather_init(&iotlb_gather); + ret = __iommu_unmap(domain, iova, size, &iotlb_gather); + iommu_tlb_sync(domain, &iotlb_gather); + + return ret; } EXPORT_SYMBOL_GPL(iommu_unmap); size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather) { - return __iommu_unmap(domain, iova, size, false); + return __iommu_unmap(domain, iova, size, iotlb_gather); } EXPORT_SYMBOL_GPL(iommu_unmap_fast); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index fad7fd8c167c..ad830abe1021 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -650,12 +650,13 @@ unpin_exit: } static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, - struct list_head *regions) + struct list_head *regions, + struct iommu_iotlb_gather *iotlb_gather) { long unlocked = 0; struct vfio_regions *entry, *next; - iommu_tlb_sync(domain->domain); + iommu_tlb_sync(domain->domain, iotlb_gather); list_for_each_entry_safe(entry, next, regions, list) { unlocked += vfio_unpin_pages_remote(dma, @@ -685,13 +686,15 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, struct vfio_dma *dma, dma_addr_t *iova, size_t len, phys_addr_t phys, long *unlocked, struct list_head *unmapped_list, - int *unmapped_cnt) + int *unmapped_cnt, + struct iommu_iotlb_gather *iotlb_gather) { size_t unmapped = 0; struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry) { - unmapped = iommu_unmap_fast(domain->domain, *iova, len); + unmapped = iommu_unmap_fast(domain->domain, *iova, len, + iotlb_gather); if (!unmapped) { kfree(entry); @@ -711,8 +714,8 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, * or in case of errors. */ if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { - *unlocked += vfio_sync_unpin(dma, domain, - unmapped_list); + *unlocked += vfio_sync_unpin(dma, domain, unmapped_list, + iotlb_gather); *unmapped_cnt = 0; } @@ -743,6 +746,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma_addr_t iova = dma->iova, end = dma->iova + dma->size; struct vfio_domain *domain, *d; LIST_HEAD(unmapped_region_list); + struct iommu_iotlb_gather iotlb_gather; int unmapped_region_cnt = 0; long unlocked = 0; @@ -767,6 +771,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, cond_resched(); } + iommu_iotlb_gather_init(&iotlb_gather); while (iova < end) { size_t unmapped, len; phys_addr_t phys, next; @@ -795,7 +800,8 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, */ unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, &unlocked, &unmapped_region_list, - &unmapped_region_cnt); + &unmapped_region_cnt, + &iotlb_gather); if (!unmapped) { unmapped = unmap_unpin_slow(domain, dma, &iova, len, phys, &unlocked); @@ -806,8 +812,10 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->iommu_mapped = false; - if (unmapped_region_cnt) - unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list); + if (unmapped_region_cnt) { + unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list, + &iotlb_gather); + } if (do_accounting) { vfio_lock_acct(dma, -unlocked, true); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1e21431262d9..aaf073010a9a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -191,6 +191,23 @@ struct iommu_sva_ops { #ifdef CONFIG_IOMMU_API +/** + * struct iommu_iotlb_gather - Range information for a pending IOTLB flush + * + * @start: IOVA representing the start of the range to be flushed + * @end: IOVA representing the end of the range to be flushed (exclusive) + * @pgsize: The interval at which to perform the flush + * + * This structure is intended to be updated by multiple calls to the + * ->unmap() function in struct iommu_ops before eventually being passed + * into ->iotlb_sync(). + */ +struct iommu_iotlb_gather { + unsigned long start; + unsigned long end; + size_t pgsize; +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -375,6 +392,13 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return (struct iommu_device *)dev_get_drvdata(dev); } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ + *gather = (struct iommu_iotlb_gather) { + .start = ULONG_MAX, + }; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -399,7 +423,8 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size); + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); @@ -473,10 +498,13 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) domain->ops->flush_iotlb_all(domain); } -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { if (domain->ops->iotlb_sync) domain->ops->iotlb_sync(domain); + + iommu_iotlb_gather_init(iotlb_gather); } /* PCI device grouping function */ @@ -557,6 +585,7 @@ struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; +struct iommu_iotlb_gather {}; static inline bool iommu_present(struct bus_type *bus) { @@ -611,7 +640,8 @@ static inline size_t iommu_unmap(struct iommu_domain *domain, } static inline size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, int gfp_order) + unsigned long iova, int gfp_order, + struct iommu_iotlb_gather *iotlb_gather) { return 0; } @@ -627,7 +657,8 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { } -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { } @@ -812,6 +843,10 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return NULL; } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } -- cgit v1.2.3 From 56f8af5e9d38f120cba2c2adb0786fa2dbc901a4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:06 +0100 Subject: iommu: Pass struct iommu_iotlb_gather to ->unmap() and ->iotlb_sync() To allow IOMMU drivers to batch up TLB flushing operations and postpone them until ->iotlb_sync() is called, extend the prototypes for the ->unmap() and ->iotlb_sync() IOMMU ops callbacks to take a pointer to the current iommu_iotlb_gather structure. All affected IOMMU drivers are updated, but there should be no functional change since the extra parameter is ignored for now. Signed-off-by: Will Deacon --- drivers/iommu/amd_iommu.c | 11 +++++++++-- drivers/iommu/arm-smmu-v3.c | 7 ++++--- drivers/iommu/arm-smmu.c | 5 +++-- drivers/iommu/exynos-iommu.c | 3 ++- drivers/iommu/intel-iommu.c | 3 ++- drivers/iommu/iommu.c | 2 +- drivers/iommu/ipmmu-vmsa.c | 12 +++++++++--- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 13 ++++++++++--- drivers/iommu/mtk_iommu_v1.c | 3 ++- drivers/iommu/omap-iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 12 +++++++++--- drivers/iommu/rockchip-iommu.c | 2 +- drivers/iommu/s390-iommu.c | 3 ++- drivers/iommu/tegra-gart.c | 12 +++++++++--- drivers/iommu/tegra-smmu.c | 2 +- drivers/iommu/virtio-iommu.c | 5 +++-- include/linux/iommu.h | 7 ++++--- 18 files changed, 73 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f93b148cf55e..29eeea914660 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3055,7 +3055,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, } static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - size_t page_size) + size_t page_size, + struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); size_t unmap_size; @@ -3196,6 +3197,12 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) domain_flush_complete(dom); } +static void amd_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + amd_iommu_flush_iotlb_all(domain); +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3214,7 +3221,7 @@ const struct iommu_ops amd_iommu_ops = { .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_sync = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, }; /***************************************************************************** diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7e137e1e28f1..80753b8ca054 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1985,8 +1985,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return ops->map(ops, iova, paddr, size, prot); } -static size_t -arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) +static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) { int ret; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -2010,7 +2010,8 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) arm_smmu_tlb_inv_context(smmu_domain); } -static void arm_smmu_iotlb_sync(struct iommu_domain *domain) +static void arm_smmu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index dc08db347ef3..e535ae2a9e65 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1301,7 +1301,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1329,7 +1329,8 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) } } -static void arm_smmu_iotlb_sync(struct iommu_domain *domain) +static void arm_smmu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index b0c1e5f9daae..cf5af34cb681 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1130,7 +1130,8 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain } static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, - unsigned long l_iova, size_t size) + unsigned long l_iova, size_t size, + struct iommu_iotlb_gather *gather) { struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..b9fb8d6ddc6e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5147,7 +5147,8 @@ static int intel_iommu_map(struct iommu_domain *domain, } static size_t intel_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct page *freelist = NULL; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d67222fdfe44..70bfbcc09248 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1899,7 +1899,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, while (unmapped < size) { size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); - unmapped_page = ops->unmap(domain, iova, pgsize); + unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); if (!unmapped_page) break; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2c14a2c65b22..a9332b893ce2 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -733,14 +733,14 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); return domain->iop->unmap(domain->iop, iova, size); } -static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) +static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); @@ -748,6 +748,12 @@ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) ipmmu_tlb_flush_all(domain); } +static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, + struct iommu_iotlb_gather *gather) +{ + ipmmu_flush_iotlb_all(io_domain); +} + static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, dma_addr_t iova) { @@ -957,7 +963,7 @@ static const struct iommu_ops ipmmu_ops = { .detach_dev = ipmmu_detach_device, .map = ipmmu_map, .unmap = ipmmu_unmap, - .flush_iotlb_all = ipmmu_iotlb_sync, + .flush_iotlb_all = ipmmu_flush_iotlb_all, .iotlb_sync = ipmmu_iotlb_sync, .iova_to_phys = ipmmu_iova_to_phys, .add_device = ipmmu_add_device, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 8b602384a385..681ab3d3376d 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -509,7 +509,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t len) + size_t len, struct iommu_iotlb_gather *gather) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index fed77658d67e..c870f1674903 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); unsigned long flags; @@ -384,7 +385,13 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, return unmapsz; } -static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) +static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); +} + +static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); } @@ -490,7 +497,7 @@ static const struct iommu_ops mtk_iommu_ops = { .detach_dev = mtk_iommu_detach_device, .map = mtk_iommu_map, .unmap = mtk_iommu_unmap, - .flush_iotlb_all = mtk_iommu_iotlb_sync, + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, .iotlb_sync = mtk_iommu_iotlb_sync, .iova_to_phys = mtk_iommu_iova_to_phys, .add_device = mtk_iommu_add_device, diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index abeeac488372..7b92ddd5d9fd 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -324,7 +324,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); unsigned long flags; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index dfb961d8c21b..8039bc5ee425 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1149,7 +1149,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, } static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct device *dev = omap_domain->dev; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index fd9d9f4da735..a7432991fa04 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -417,7 +417,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { size_t ret; unsigned long flags; @@ -441,7 +441,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, return ret; } -static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) +static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, @@ -454,6 +454,12 @@ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) pm_runtime_put_sync(qcom_domain->iommu->dev); } +static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + qcom_iommu_flush_iotlb_all(domain); +} + static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { @@ -581,7 +587,7 @@ static const struct iommu_ops qcom_iommu_ops = { .detach_dev = qcom_iommu_detach_dev, .map = qcom_iommu_map, .unmap = qcom_iommu_unmap, - .flush_iotlb_all = qcom_iommu_iotlb_sync, + .flush_iotlb_all = qcom_iommu_flush_iotlb_all, .iotlb_sync = qcom_iommu_iotlb_sync, .iova_to_phys = qcom_iommu_iova_to_phys, .add_device = qcom_iommu_add_device, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index dc26d74d79c2..26290f310f90 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -794,7 +794,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, } static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 22d4db302c1c..3b0b18e23187 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -314,7 +314,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, } static size_t s390_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct s390_domain *s390_domain = to_s390_domain(domain); int flags = ZPCI_PTE_INVALID; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 6d40bc1b38bf..3924f7c05544 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -207,7 +207,7 @@ static inline int __gart_iommu_unmap(struct gart_device *gart, } static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t bytes) + size_t bytes, struct iommu_iotlb_gather *gather) { struct gart_device *gart = gart_handle; int err; @@ -273,11 +273,17 @@ static int gart_iommu_of_xlate(struct device *dev, return 0; } -static void gart_iommu_sync(struct iommu_domain *domain) +static void gart_iommu_sync_map(struct iommu_domain *domain) { FLUSH_GART_REGS(gart_handle); } +static void gart_iommu_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + gart_iommu_sync_map(domain); +} + static const struct iommu_ops gart_iommu_ops = { .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, @@ -292,7 +298,7 @@ static const struct iommu_ops gart_iommu_ops = { .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, - .iotlb_sync_map = gart_iommu_sync, + .iotlb_sync_map = gart_iommu_sync_map, .iotlb_sync = gart_iommu_sync, }; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index c4a652b227f8..7293fc3f796d 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -680,7 +680,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, } static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..5f9f91a4d7f3 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -742,7 +742,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { int ret = 0; size_t unmapped; @@ -788,7 +788,8 @@ static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, return paddr; } -static void viommu_iotlb_sync(struct iommu_domain *domain) +static void viommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { struct viommu_domain *vdomain = to_viommu_domain(domain); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ad41aee55bc6..64ebaff33455 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -258,10 +258,11 @@ struct iommu_ops { int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_sync_map)(struct iommu_domain *domain); - void (*iotlb_sync)(struct iommu_domain *domain); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); @@ -502,7 +503,7 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) { if (domain->ops->iotlb_sync) - domain->ops->iotlb_sync(domain); + domain->ops->iotlb_sync(domain, iotlb_gather); iommu_iotlb_gather_init(iotlb_gather); } -- cgit v1.2.3 From 05aed9412b0bd0d9a985d94010c42ff0a5c6cc29 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:25 +0100 Subject: iommu/io-pgtable: Hook up ->tlb_flush_walk() and ->tlb_flush_leaf() in drivers Hook up ->tlb_flush_walk() and ->tlb_flush_leaf() in drivers using the io-pgtable API so that we can start making use of them in the page-table code. For now, they can just wrap the implementations of ->tlb_add_flush and ->tlb_sync pending future optimisation in each driver. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 14 ++++++++++++++ drivers/iommu/arm-smmu-v3.c | 22 ++++++++++++++++++++++ drivers/iommu/arm-smmu.c | 24 ++++++++++++++++++++++++ drivers/iommu/ipmmu-vmsa.c | 8 ++++++++ drivers/iommu/msm_iommu.c | 16 ++++++++++++++++ drivers/iommu/mtk_iommu.c | 16 ++++++++++++++++ drivers/iommu/qcom_iommu.c | 16 ++++++++++++++++ 7 files changed, 116 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 17bceb11e708..651858147bd6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -257,8 +257,22 @@ static void mmu_tlb_sync_context(void *cookie) // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } +static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + +static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, + .tlb_flush_walk = mmu_tlb_flush_walk, + .tlb_flush_leaf = mmu_tlb_flush_leaf, .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, }; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 80753b8ca054..79819b003b07 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,8 +1603,30 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } +static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie); + arm_smmu_cmdq_issue_sync(smmu); +} + +static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie); + arm_smmu_cmdq_issue_sync(smmu); +} + static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e535ae2a9e65..e9f01b860ae3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -547,20 +547,44 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } +static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie); + smmu_domain->tlb_ops->tlb_sync(cookie); +} + +static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie); + smmu_domain->tlb_ops->tlb_sync(cookie); +} + static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index a9332b893ce2..9cc7bcb7e39d 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -361,6 +361,12 @@ static void ipmmu_tlb_flush_all(void *cookie) ipmmu_tlb_invalidate(domain); } +static void ipmmu_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + ipmmu_tlb_flush_all(cookie); +} + static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { @@ -369,6 +375,8 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, + .tlb_flush_walk = ipmmu_tlb_flush, + .tlb_flush_leaf = ipmmu_tlb_flush, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, }; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 681ab3d3376d..64132093751a 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -178,8 +178,24 @@ static void __flush_iotlb_sync(void *cookie) */ } +static void __flush_iotlb_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, size, granule, false, cookie); + __flush_iotlb_sync(cookie); +} + +static void __flush_iotlb_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, size, granule, true, cookie); + __flush_iotlb_sync(cookie); +} + static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, + .tlb_flush_walk = __flush_iotlb_walk, + .tlb_flush_leaf = __flush_iotlb_leaf, .tlb_add_flush = __flush_iotlb_range, .tlb_sync = __flush_iotlb_sync, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c870f1674903..85a7176bf9ae 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -188,8 +188,24 @@ static void mtk_iommu_tlb_sync(void *cookie) } } +static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); + mtk_iommu_tlb_sync(cookie); +} + +static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); + mtk_iommu_tlb_sync(cookie); +} + static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, + .tlb_flush_walk = mtk_iommu_tlb_flush_walk, + .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_sync = mtk_iommu_tlb_sync, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index a7432991fa04..643079e52e69 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -164,8 +164,24 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } +static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); + qcom_iommu_tlb_sync(cookie); +} + +static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); + qcom_iommu_tlb_sync(cookie); +} + static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, + .tlb_flush_walk = qcom_iommu_tlb_flush_walk, + .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_sync = qcom_iommu_tlb_sync, }; -- cgit v1.2.3 From 10b7a7d912697afd681a0bcfced9e05543aded35 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:32 +0100 Subject: iommu/io-pgtable-arm: Call ->tlb_flush_walk() and ->tlb_flush_leaf() Now that all IOMMU drivers using the io-pgtable API implement the ->tlb_flush_walk() and ->tlb_flush_leaf() callbacks, we can use them in the io-pgtable code instead of ->tlb_add_flush() immediately followed by ->tlb_sync(). Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 25 +++++++++++++++---------- drivers/iommu/io-pgtable-arm.c | 17 ++++++++++++----- include/linux/io-pgtable.h | 14 ++++++++++++++ 3 files changed, 41 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 116f97ee991e..8d4914fe73bc 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -493,9 +493,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, * a chance for anything to kick off a table walk for the new iova. */ if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_V7S_BLOCK_SIZE(2), false); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_walk(iop, iova, size, + ARM_V7S_BLOCK_SIZE(2)); } else { wmb(); } @@ -541,8 +540,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); size *= ARM_V7S_CONT_PAGES; - io_pgtable_tlb_add_flush(iop, iova, size, size, true); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_leaf(iop, iova, size, size); return pte; } @@ -637,9 +635,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, for (i = 0; i < num_entries; i++) { if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, blk_size, - ARM_V7S_BLOCK_SIZE(lvl + 1), false); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_walk(iop, iova, blk_size, + ARM_V7S_BLOCK_SIZE(lvl + 1)); ptep = iopte_deref(pte[i], lvl); __arm_v7s_free_table(ptep, lvl + 1, data); } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { @@ -805,13 +802,19 @@ static void dummy_tlb_flush_all(void *cookie) WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, + void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } +static void dummy_tlb_add_flush(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + dummy_tlb_flush(iova, size, granule, cookie); +} + static void dummy_tlb_sync(void *cookie) { WARN_ON(cookie != cfg_cookie); @@ -819,6 +822,8 @@ static void dummy_tlb_sync(void *cookie) static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, + .tlb_flush_walk = dummy_tlb_flush, + .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, }; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 402f913b6f6d..b58338c86323 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -611,9 +611,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, if (!iopte_leaf(pte, lvl, iop->fmt)) { /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_LPAE_GRANULE(data), false); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_walk(iop, iova, size, + ARM_LPAE_GRANULE(data)); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { @@ -1069,13 +1068,19 @@ static void dummy_tlb_flush_all(void *cookie) WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, + void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } +static void dummy_tlb_add_flush(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + dummy_tlb_flush(iova, size, granule, cookie); +} + static void dummy_tlb_sync(void *cookie) { WARN_ON(cookie != cfg_cookie); @@ -1083,6 +1088,8 @@ static void dummy_tlb_sync(void *cookie) static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, + .tlb_flush_walk = dummy_tlb_flush, + .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 27275575b305..0618aac59e74 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -198,6 +198,20 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) iop->cfg.tlb->tlb_flush_all(iop->cookie); } +static inline void +io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) +{ + iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); +} + +static inline void +io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) +{ + iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); +} + static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, unsigned long iova, size_t size, size_t granule, bool leaf) { -- cgit v1.2.3 From abfd6fe0cd535d31ee83b668be6eb59ce6a8469d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:41 +0100 Subject: iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page() The ->tlb_add_flush() callback in the io-pgtable API now looks a bit silly: - It takes a size and a granule, which are always the same - It takes a 'bool leaf', which is always true - It only ever flushes a single page With that in mind, replace it with an optional ->tlb_add_page() callback that drops the useless parameters. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 5 -- drivers/iommu/arm-smmu-v3.c | 8 ++- drivers/iommu/arm-smmu.c | 88 +++++++++++++++++++++------------ drivers/iommu/io-pgtable-arm-v7s.c | 12 ++--- drivers/iommu/io-pgtable-arm.c | 11 ++--- drivers/iommu/ipmmu-vmsa.c | 7 --- drivers/iommu/msm_iommu.c | 7 ++- drivers/iommu/mtk_iommu.c | 8 ++- drivers/iommu/qcom_iommu.c | 8 ++- include/linux/io-pgtable.h | 22 ++++----- 10 files changed, 105 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 651858147bd6..ff9af320cacc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -247,10 +247,6 @@ static void mmu_tlb_inv_context_s1(void *cookie) mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); } -static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) -{} - static void mmu_tlb_sync_context(void *cookie) { //struct panfrost_device *pfdev = cookie; @@ -273,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, .tlb_flush_leaf = mmu_tlb_flush_leaf, - .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, }; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 79819b003b07..98c90a1b4b22 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,6 +1603,12 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } +static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); +} + static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { @@ -1627,7 +1633,7 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_add_page = arm_smmu_tlb_inv_page_nosync, .tlb_sync = arm_smmu_tlb_sync, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e9f01b860ae3..f056164a94b0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -248,10 +248,16 @@ enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_BYPASS, }; +struct arm_smmu_flush_ops { + struct iommu_flush_ops tlb; + void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie) +}; + struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct iommu_flush_ops *tlb_ops; + const struct arm_smmu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; bool non_strict; @@ -551,42 +557,62 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie); - smmu_domain->tlb_ops->tlb_sync(cookie); + ops->tlb_inv_range(iova, size, granule, false, cookie); + ops->tlb.tlb_sync(cookie); } static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + + ops->tlb_inv_range(iova, size, granule, true, cookie); + ops->tlb.tlb_sync(cookie); +} + +static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, + void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie); - smmu_domain->tlb_ops->tlb_sync(cookie); + ops->tlb_inv_range(iova, granule, granule, true, cookie); } -static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, +static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_context, + }, + .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, }; -static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, +static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_context, + }, + .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, }; -static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, - .tlb_sync = arm_smmu_tlb_sync_vmid, +static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_vmid, + }, + .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -866,7 +892,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ias = min(ias, 32UL); oas = min(oas, 32UL); } - smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops; + smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; break; case ARM_SMMU_DOMAIN_NESTED: /* @@ -886,9 +912,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, oas = min(oas, 40UL); } if (smmu->version == ARM_SMMU_V2) - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; else - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; break; default: ret = -EINVAL; @@ -917,7 +943,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, - .tlb = smmu_domain->tlb_ops, + .tlb = &smmu_domain->flush_ops->tlb, .iommu_dev = smmu->dev, }; @@ -1346,9 +1372,9 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { + if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); + smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); arm_smmu_rpm_put(smmu); } } @@ -1359,9 +1385,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { + if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_sync(smmu_domain); + smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain); arm_smmu_rpm_put(smmu); } } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8d4914fe73bc..b3f975c95f76 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -584,7 +584,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, return __arm_v7s_unmap(data, iova, size, 2, tablep); } - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_add_page(&data->iop, iova, size); return size; } @@ -647,8 +647,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, blk_size, - blk_size, true); + io_pgtable_tlb_add_page(iop, iova, blk_size); } iova += blk_size; } @@ -809,10 +808,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) { - dummy_tlb_flush(iova, size, granule, cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } static void dummy_tlb_sync(void *cookie) @@ -824,7 +822,7 @@ static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, - .tlb_add_flush = dummy_tlb_add_flush, + .tlb_add_page = dummy_tlb_add_page, .tlb_sync = dummy_tlb_sync, }; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b58338c86323..a5c0db01533e 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -582,7 +582,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_add_page(&data->iop, iova, size); return size; } @@ -623,7 +623,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, size, size, true); + io_pgtable_tlb_add_page(iop, iova, size); } return size; @@ -1075,10 +1075,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) { - dummy_tlb_flush(iova, size, granule, cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } static void dummy_tlb_sync(void *cookie) @@ -1090,7 +1089,7 @@ static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, - .tlb_add_flush = dummy_tlb_add_flush, + .tlb_add_page = dummy_tlb_add_page, .tlb_sync = dummy_tlb_sync, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9cc7bcb7e39d..c4da271af90e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -367,17 +367,10 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size, ipmmu_tlb_flush_all(cookie); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) -{ - /* The hardware doesn't support selective TLB flush. */ -} - static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_walk = ipmmu_tlb_flush, .tlb_flush_leaf = ipmmu_tlb_flush, - .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, }; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 64132093751a..2cd83295a841 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -192,11 +192,16 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size, __flush_iotlb_sync(cookie); } +static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_flush_walk = __flush_iotlb_walk, .tlb_flush_leaf = __flush_iotlb_leaf, - .tlb_add_flush = __flush_iotlb_range, + .tlb_add_page = __flush_iotlb_page, .tlb_sync = __flush_iotlb_sync, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 85a7176bf9ae..a0b4b4dc4b90 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -202,11 +202,17 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, mtk_iommu_tlb_sync(cookie); } +static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule, + void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_walk = mtk_iommu_tlb_flush_walk, .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, - .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, + .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, .tlb_sync = mtk_iommu_tlb_sync, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 643079e52e69..7d8411dee4cf 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -178,11 +178,17 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, qcom_iommu_tlb_sync(cookie); } +static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule, + void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_flush_walk = qcom_iommu_tlb_flush_walk, .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, - .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, + .tlb_add_page = qcom_iommu_tlb_add_page, .tlb_sync = qcom_iommu_tlb_sync, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 0618aac59e74..99e04bd2baa1 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -25,12 +25,11 @@ enum io_pgtable_fmt { * address range. * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual * address range. - * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a - * virtual address range. This function exists purely as an - * optimisation for IOMMUs that cannot batch TLB invalidation - * operations efficiently and are therefore better suited to - * issuing them early rather than deferring them until - * iommu_tlb_sync(). + * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a + * single page. This function exists purely as an optimisation + * for IOMMUs that cannot batch TLB invalidation operations + * efficiently and are therefore better suited to issuing them + * early rather than deferring them until iommu_tlb_sync(). * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and * any corresponding page table updates are visible to the * IOMMU. @@ -44,8 +43,7 @@ struct iommu_flush_ops { void *cookie); void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void *cookie); - void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); + void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); void (*tlb_sync)(void *cookie); }; @@ -212,10 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); } -static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, - unsigned long iova, size_t size, size_t granule, bool leaf) +static inline void +io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, + size_t granule) { - iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); + if (iop->cfg.tlb->tlb_add_page) + iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); } static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) -- cgit v1.2.3 From e953f7f2fa78d1c7fd064171f88457c6b1e21af9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:50 +0100 Subject: iommu/io-pgtable: Remove unused ->tlb_sync() callback The ->tlb_sync() callback is no longer used, so it can be removed. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 1 - drivers/iommu/arm-smmu-v3.c | 8 -------- drivers/iommu/arm-smmu.c | 17 +++++++++-------- drivers/iommu/io-pgtable-arm-v7s.c | 6 ------ drivers/iommu/io-pgtable-arm.c | 6 ------ drivers/iommu/ipmmu-vmsa.c | 1 - drivers/iommu/msm_iommu.c | 20 +++++++------------- drivers/iommu/mtk_iommu.c | 1 - drivers/iommu/qcom_iommu.c | 1 - include/linux/io-pgtable.h | 9 --------- 10 files changed, 16 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index ff9af320cacc..de22a2276e00 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -269,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, .tlb_flush_leaf = mmu_tlb_flush_leaf, - .tlb_sync = mmu_tlb_sync_context, }; static const char *access_type_name(struct panfrost_device *pfdev, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 98c90a1b4b22..231093413ff9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1545,13 +1545,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, } /* IO_PGTABLE API */ -static void arm_smmu_tlb_sync(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_cmdq_issue_sync(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; @@ -1634,7 +1627,6 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_inv_page_nosync, - .tlb_sync = arm_smmu_tlb_sync, }; /* IOMMU API */ diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f056164a94b0..07a267c437d6 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -251,7 +251,8 @@ enum arm_smmu_domain_stage { struct arm_smmu_flush_ops { struct iommu_flush_ops tlb; void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie) + bool leaf, void *cookie); + void (*tlb_sync)(void *cookie); }; struct arm_smmu_domain { @@ -539,7 +540,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think. + * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think. */ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) @@ -560,7 +561,7 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; ops->tlb_inv_range(iova, size, granule, false, cookie); - ops->tlb.tlb_sync(cookie); + ops->tlb_sync(cookie); } static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, @@ -570,7 +571,7 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; ops->tlb_inv_range(iova, size, granule, true, cookie); - ops->tlb.tlb_sync(cookie); + ops->tlb_sync(cookie); } static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, @@ -588,9 +589,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_context, }, .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { @@ -599,9 +600,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_context, }, .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { @@ -610,9 +611,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_vmid, }, .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, + .tlb_sync = arm_smmu_tlb_sync_vmid, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -1387,7 +1388,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain); + smmu_domain->flush_ops->tlb_sync(smmu_domain); arm_smmu_rpm_put(smmu); } } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index b3f975c95f76..203894fb6765 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -813,17 +813,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) dummy_tlb_flush(iova, granule, granule, cookie); } -static void dummy_tlb_sync(void *cookie) -{ - WARN_ON(cookie != cfg_cookie); -} - static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, - .tlb_sync = dummy_tlb_sync, }; #define __FAIL(ops) ({ \ diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a5c0db01533e..f35516744965 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1080,17 +1080,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) dummy_tlb_flush(iova, granule, granule, cookie); } -static void dummy_tlb_sync(void *cookie) -{ - WARN_ON(cookie != cfg_cookie); -} - static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, - .tlb_sync = dummy_tlb_sync, }; static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index c4da271af90e..a2b8eff4c1f7 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -371,7 +371,6 @@ static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_walk = ipmmu_tlb_flush, .tlb_flush_leaf = ipmmu_tlb_flush, - .tlb_sync = ipmmu_tlb_flush_all, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 2cd83295a841..ccfc7ed230ef 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -168,28 +168,16 @@ fail: return; } -static void __flush_iotlb_sync(void *cookie) -{ - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ -} - static void __flush_iotlb_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { __flush_iotlb_range(iova, size, granule, false, cookie); - __flush_iotlb_sync(cookie); } static void __flush_iotlb_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { __flush_iotlb_range(iova, size, granule, true, cookie); - __flush_iotlb_sync(cookie); } static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) @@ -202,7 +190,6 @@ static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_walk = __flush_iotlb_walk, .tlb_flush_leaf = __flush_iotlb_leaf, .tlb_add_page = __flush_iotlb_page, - .tlb_sync = __flush_iotlb_sync, }; static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) @@ -712,6 +699,13 @@ static struct iommu_ops msm_iommu_ops = { .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, .iova_to_phys = msm_iommu_iova_to_phys, .add_device = msm_iommu_add_device, .remove_device = msm_iommu_remove_device, diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index a0b4b4dc4b90..3785750bdb44 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -213,7 +213,6 @@ static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_walk = mtk_iommu_tlb_flush_walk, .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, - .tlb_sync = mtk_iommu_tlb_sync, }; static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 7d8411dee4cf..0b8a6d6bb475 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -189,7 +189,6 @@ static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_walk = qcom_iommu_tlb_flush_walk, .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, .tlb_add_page = qcom_iommu_tlb_add_page, - .tlb_sync = qcom_iommu_tlb_sync, }; static irqreturn_t qcom_iommu_fault(int irq, void *dev) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 99e04bd2baa1..843310484fe2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -30,9 +30,6 @@ enum io_pgtable_fmt { * for IOMMUs that cannot batch TLB invalidation operations * efficiently and are therefore better suited to issuing them * early rather than deferring them until iommu_tlb_sync(). - * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and - * any corresponding page table updates are visible to the - * IOMMU. * * Note that these can all be called in atomic context and must therefore * not block. @@ -44,7 +41,6 @@ struct iommu_flush_ops { void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void *cookie); void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); - void (*tlb_sync)(void *cookie); }; /** @@ -218,11 +214,6 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); } -static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) -{ - iop->cfg.tlb->tlb_sync(iop->cookie); -} - /** * struct io_pgtable_init_fns - Alloc/free a set of page tables for a * particular format. -- cgit v1.2.3 From a2d3a382d6c682e22b263c9e7f0d857c3fa6c9d6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:58 +0100 Subject: iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->unmap() Update the io-pgtable ->unmap() function to take an iommu_iotlb_gather pointer as an argument, and update the callers as appropriate. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm-v7s.c | 6 +++--- drivers/iommu/io-pgtable-arm.c | 7 +++---- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- include/linux/io-pgtable.h | 4 +++- 10 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index de22a2276e00..6e8145c36e93 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -222,7 +222,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); - unmapped_page = ops->unmap(ops, iova, pgsize); + unmapped_page = ops->unmap(ops, iova, pgsize, NULL); if (!unmapped_page) break; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 231093413ff9..8e2e53079f48 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2015,7 +2015,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, if (!ops) return 0; - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size)) return 0; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 07a267c437d6..f6689956ab6e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1362,7 +1362,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return 0; arm_smmu_rpm_get(smmu); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); arm_smmu_rpm_put(smmu); return ret; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 203894fb6765..a7776e982b6c 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -666,7 +666,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, } static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); @@ -892,7 +892,7 @@ static int __init arm_v7s_do_selftests(void) size = 1UL << __ffs(cfg.pgsize_bitmap); while (i < loopnr) { iova_start = i * SZ_16M; - if (ops->unmap(ops, iova_start + size, size) != size) + if (ops->unmap(ops, iova_start + size, size, NULL) != size) return __FAIL(ops); /* Remap of partial unmap */ @@ -910,7 +910,7 @@ static int __init arm_v7s_do_selftests(void) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { size = 1UL << i; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f35516744965..325430f8a0a1 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -642,7 +641,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, } static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte *ptep = data->pgd; @@ -1167,7 +1166,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) /* Partial unmap */ size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size) != size) + if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) return __FAIL(ops, i); /* Remap of partial unmap */ @@ -1182,7 +1181,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { size = 1UL << j; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42)) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index a2b8eff4c1f7..76a8ec343d53 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -737,7 +737,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size); + return domain->iop->unmap(domain->iop, iova, size, gather); } static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index ccfc7ed230ef..8a0dcaf0a9e9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -523,7 +523,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, unsigned long flags; spin_lock_irqsave(&priv->pgtlock, flags); - len = priv->iop->unmap(priv->iop, iova, len); + len = priv->iop->unmap(priv->iop, iova, len, gather); spin_unlock_irqrestore(&priv->pgtlock, flags); return len; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 3785750bdb44..b73cffd63262 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -400,7 +400,7 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, size_t unmapsz; spin_lock_irqsave(&dom->pgtlock, flags); - unmapsz = dom->iop->unmap(dom->iop, iova, size); + unmapsz = dom->iop->unmap(dom->iop, iova, size, gather); spin_unlock_irqrestore(&dom->pgtlock, flags); return unmapsz; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 0b8a6d6bb475..48b288ef74b4 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -455,7 +455,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, */ pm_runtime_get_sync(qcom_domain->iommu->dev); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); pm_runtime_put_sync(qcom_domain->iommu->dev); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 843310484fe2..fe27d93c8ad9 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IO_PGTABLE_H #define __IO_PGTABLE_H + #include +#include /* * Public API for use by IOMMU drivers @@ -136,7 +138,7 @@ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; -- cgit v1.2.3 From 3951c41af4a65ba418e6b1b973d398552bedb84f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:45:15 +0100 Subject: iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->tlb_add_page() With all the pieces in place, we can finally propagate the iommu_iotlb_gather structure from the call to unmap() down to the IOMMU drivers' implementation of ->tlb_add_page(). Currently everybody ignores it, but the machinery is now there to defer invalidation. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 3 ++- drivers/iommu/arm-smmu.c | 3 ++- drivers/iommu/io-pgtable-arm-v7s.c | 23 ++++++++++++++--------- drivers/iommu/io-pgtable-arm.c | 22 ++++++++++++++-------- drivers/iommu/msm_iommu.c | 3 ++- drivers/iommu/mtk_iommu.c | 3 ++- drivers/iommu/qcom_iommu.c | 3 ++- include/linux/io-pgtable.h | 16 +++++++++------- 8 files changed, 47 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8e2e53079f48..d1ebc7103065 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1596,7 +1596,8 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } -static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule, +static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f6689956ab6e..5598d0ff71a8 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -574,7 +574,8 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, ops->tlb_sync(cookie); } -static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, +static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a7776e982b6c..18e7d212c7de 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -362,7 +362,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) return false; } -static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, +static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, + struct iommu_iotlb_gather *, unsigned long, size_t, int, arm_v7s_iopte *); static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, @@ -383,7 +384,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, size_t sz = ARM_V7S_BLOCK_SIZE(lvl); tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); - if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, + if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, sz, lvl, tblp) != sz)) return -EINVAL; } else if (ptep[i]) { @@ -545,6 +546,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, } static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) @@ -581,14 +583,15 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, return 0; tablep = iopte_deref(pte, 1); - return __arm_v7s_unmap(data, iova, size, 2, tablep); + return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); } - io_pgtable_tlb_add_page(&data->iop, iova, size); + io_pgtable_tlb_add_page(&data->iop, gather, iova, size); return size; } static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_v7s_iopte *ptep) { @@ -647,7 +650,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_page(iop, iova, blk_size); + io_pgtable_tlb_add_page(iop, gather, iova, blk_size); } iova += blk_size; } @@ -657,12 +660,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); + return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0], + ptep); } /* Keep on walkin' */ ptep = iopte_deref(pte[0], lvl); - return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); + return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep); } static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, @@ -673,7 +677,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(upper_32_bits(iova))) return 0; - return __arm_v7s_unmap(data, iova, size, 1, data->pgd); + return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd); } static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, @@ -808,7 +812,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) +static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { dummy_tlb_flush(iova, granule, granule, cookie); } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 325430f8a0a1..4c91359057c5 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -289,6 +289,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); @@ -334,8 +335,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) + if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { + WARN_ON(1); return -EINVAL; + } } __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); @@ -536,6 +539,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) } static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_lpae_iopte blk_pte, int lvl, arm_lpae_iopte *ptep) @@ -581,14 +585,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { - io_pgtable_tlb_add_page(&data->iop, iova, size); + io_pgtable_tlb_add_page(&data->iop, gather, iova, size); return size; } - return __arm_lpae_unmap(data, iova, size, lvl, tablep); + return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep) { @@ -622,7 +627,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_page(iop, iova, size); + io_pgtable_tlb_add_page(iop, gather, iova, size); } return size; @@ -631,13 +636,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, pte, + return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, lvl + 1, ptep); } /* Keep on walkin' */ ptep = iopte_deref(pte, data); - return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); + return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep); } static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, @@ -650,7 +655,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) return 0; - return __arm_lpae_unmap(data, iova, size, lvl, ptep); + return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep); } static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, @@ -1074,7 +1079,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) +static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { dummy_tlb_flush(iova, granule, granule, cookie); } diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 8a0dcaf0a9e9..4c0be5b75c28 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -180,7 +180,8 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size, __flush_iotlb_range(iova, size, granule, true, cookie); } -static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) +static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { __flush_iotlb_range(iova, granule, granule, true, cookie); } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index b73cffd63262..0827d51936fa 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -202,7 +202,8 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, mtk_iommu_tlb_sync(cookie); } -static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule, +static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 48b288ef74b4..eac760cdbb28 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -178,7 +178,8 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, qcom_iommu_tlb_sync(cookie); } -static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule, +static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) { qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index fe27d93c8ad9..6b1b8be3ebec 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -28,10 +28,10 @@ enum io_pgtable_fmt { * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual * address range. * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a - * single page. This function exists purely as an optimisation - * for IOMMUs that cannot batch TLB invalidation operations - * efficiently and are therefore better suited to issuing them - * early rather than deferring them until iommu_tlb_sync(). + * single page. IOMMUs that cannot batch TLB invalidation + * operations efficiently will typically issue them here, but + * others may decide to update the iommu_iotlb_gather structure + * and defer the invalidation until iommu_tlb_sync() instead. * * Note that these can all be called in atomic context and must therefore * not block. @@ -42,7 +42,8 @@ struct iommu_flush_ops { void *cookie); void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void *cookie); - void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); + void (*tlb_add_page)(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie); }; /** @@ -209,11 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, } static inline void -io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, +io_pgtable_tlb_add_page(struct io_pgtable *iop, + struct iommu_iotlb_gather * gather, unsigned long iova, size_t granule) { if (iop->cfg.tlb->tlb_add_page) - iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); + iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); } /** -- cgit v1.2.3 From 2a8868f16e6b1987cf43f1f46d2a12b7b6ddcd88 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:12:24 +0100 Subject: iommu/arm-smmu-v3: Separate s/w and h/w views of prod and cons indexes In preparation for rewriting the command queue insertion code to use a new algorithm, separate the software and hardware views of the prod and cons indexes so that manipulating the software state doesn't automatically update the hardware state at the same time. No functional change. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index d1ebc7103065..06f569e41d84 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -697,17 +697,13 @@ static bool queue_empty(struct arm_smmu_queue *q) Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } -static void queue_sync_cons(struct arm_smmu_queue *q) +static void queue_sync_cons_in(struct arm_smmu_queue *q) { q->cons = readl_relaxed(q->cons_reg); } -static void queue_inc_cons(struct arm_smmu_queue *q) +static void queue_sync_cons_out(struct arm_smmu_queue *q) { - u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; - - q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); - /* * Ensure that all CPU accesses (reads and writes) to the queue * are complete before we update the cons pointer. @@ -716,7 +712,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q) writel_relaxed(q->cons, q->cons_reg); } -static int queue_sync_prod(struct arm_smmu_queue *q) +static void queue_inc_cons(struct arm_smmu_queue *q) +{ + u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); +} + +static int queue_sync_prod_in(struct arm_smmu_queue *q) { int ret = 0; u32 prod = readl_relaxed(q->prod_reg); @@ -728,12 +730,15 @@ static int queue_sync_prod(struct arm_smmu_queue *q) return ret; } +static void queue_sync_prod_out(struct arm_smmu_queue *q) +{ + writel(q->prod, q->prod_reg); +} + static void queue_inc_prod(struct arm_smmu_queue *q) { u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); - writel(q->prod, q->prod_reg); } /* @@ -750,7 +755,7 @@ static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : ARM_SMMU_POLL_TIMEOUT_US); - while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) { + while (queue_sync_cons_in(q), (sync ? !queue_empty(q) : queue_full(q))) { if (ktime_compare(ktime_get(), timeout) > 0) return -ETIMEDOUT; @@ -784,6 +789,7 @@ static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); queue_inc_prod(q); + queue_sync_prod_out(q); return 0; } @@ -802,6 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); queue_inc_cons(q); + queue_sync_cons_out(q); return 0; } @@ -1322,7 +1329,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) * Not much we can do on overflow, so scream and pretend we're * trying harder. */ - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); } while (!queue_empty(q)); @@ -1379,7 +1386,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) while (!queue_remove_raw(q, evt)) arm_smmu_handle_ppr(smmu, evt); - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); } while (!queue_empty(q)); @@ -1563,8 +1570,9 @@ static void arm_smmu_tlb_inv_context(void *cookie) /* * NOTE: when io-pgtable is in non-strict mode, we may get here with * PTEs previously cleared by unmaps on the current CPU not yet visible - * to the SMMU. We are relying on the DSB implicit in queue_inc_prod() - * to guarantee those are observed before the TLBI. Do be careful, 007. + * to the SMMU. We are relying on the DSB implicit in + * queue_sync_prod_out() to guarantee those are observed before the + * TLBI. Do be careful, 007. */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); arm_smmu_cmdq_issue_sync(smmu); -- cgit v1.2.3 From 8a073da07bac169601a1874606e09bdb62811978 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:15:50 +0100 Subject: iommu/arm-smmu-v3: Drop unused 'q' argument from Q_OVF macro The Q_OVF macro doesn't need to access the arm_smmu_queue structure, so drop the unused macro argument. No functional change. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 06f569e41d84..3a8020795959 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -184,7 +184,7 @@ #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) #define Q_OVERFLOW_FLAG (1 << 31) -#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ Q_IDX(q, p) * (q)->ent_dwords) @@ -715,7 +715,7 @@ static void queue_sync_cons_out(struct arm_smmu_queue *q) static void queue_inc_cons(struct arm_smmu_queue *q) { u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; - q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); + q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } static int queue_sync_prod_in(struct arm_smmu_queue *q) @@ -723,7 +723,7 @@ static int queue_sync_prod_in(struct arm_smmu_queue *q) int ret = 0; u32 prod = readl_relaxed(q->prod_reg); - if (Q_OVF(q, prod) != Q_OVF(q, q->prod)) + if (Q_OVF(prod) != Q_OVF(q->prod)) ret = -EOVERFLOW; q->prod = prod; @@ -738,7 +738,7 @@ static void queue_sync_prod_out(struct arm_smmu_queue *q) static void queue_inc_prod(struct arm_smmu_queue *q) { u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); + q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } /* @@ -1334,7 +1334,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + q->cons = Q_OVF(q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); return IRQ_HANDLED; } @@ -1391,7 +1391,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + q->cons = Q_OVF(q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); writel(q->cons, q->cons_reg); return IRQ_HANDLED; } -- cgit v1.2.3 From 52be86374f48a0d34015257f3e06965910b5b774 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:16:08 +0100 Subject: iommu/arm-smmu-v3: Move low-level queue fields out of arm_smmu_queue In preparation for rewriting the command queue insertion code to use a new algorithm, introduce a new arm_smmu_ll_queue structure which contains only the information necessary to perform queue arithmetic for a queue and will later be extended so that we can perform complex atomic manipulation on some of the fields. No functional change. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 88 ++++++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 3a8020795959..cb1aa156974e 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -181,8 +181,8 @@ #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 #define ARM_SMMU_MEMATTR_OIWB 0xf -#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) -#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) +#define Q_IDX(q, p) ((p) & ((1 << (q)->llq.max_n_shift) - 1)) +#define Q_WRP(q, p) ((p) & (1 << (q)->llq.max_n_shift)) #define Q_OVERFLOW_FLAG (1 << 31) #define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ @@ -478,7 +478,14 @@ struct arm_smmu_cmdq_ent { }; }; +struct arm_smmu_ll_queue { + u32 prod; + u32 cons; + u32 max_n_shift; +}; + struct arm_smmu_queue { + struct arm_smmu_ll_queue llq; int irq; /* Wired interrupt */ __le64 *base; @@ -486,9 +493,6 @@ struct arm_smmu_queue { u64 q_base; size_t ent_dwords; - u32 max_n_shift; - u32 prod; - u32 cons; u32 __iomem *prod_reg; u32 __iomem *cons_reg; @@ -687,19 +691,19 @@ static void parse_driver_options(struct arm_smmu_device *smmu) /* Low-level queue manipulation functions */ static bool queue_full(struct arm_smmu_queue *q) { - return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && - Q_WRP(q, q->prod) != Q_WRP(q, q->cons); + return Q_IDX(q, q->llq.prod) == Q_IDX(q, q->llq.cons) && + Q_WRP(q, q->llq.prod) != Q_WRP(q, q->llq.cons); } static bool queue_empty(struct arm_smmu_queue *q) { - return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && - Q_WRP(q, q->prod) == Q_WRP(q, q->cons); + return Q_IDX(q, q->llq.prod) == Q_IDX(q, q->llq.cons) && + Q_WRP(q, q->llq.prod) == Q_WRP(q, q->llq.cons); } static void queue_sync_cons_in(struct arm_smmu_queue *q) { - q->cons = readl_relaxed(q->cons_reg); + q->llq.cons = readl_relaxed(q->cons_reg); } static void queue_sync_cons_out(struct arm_smmu_queue *q) @@ -709,13 +713,13 @@ static void queue_sync_cons_out(struct arm_smmu_queue *q) * are complete before we update the cons pointer. */ mb(); - writel_relaxed(q->cons, q->cons_reg); + writel_relaxed(q->llq.cons, q->cons_reg); } static void queue_inc_cons(struct arm_smmu_queue *q) { - u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; - q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); + u32 cons = (Q_WRP(q, q->llq.cons) | Q_IDX(q, q->llq.cons)) + 1; + q->llq.cons = Q_OVF(q->llq.cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } static int queue_sync_prod_in(struct arm_smmu_queue *q) @@ -723,22 +727,22 @@ static int queue_sync_prod_in(struct arm_smmu_queue *q) int ret = 0; u32 prod = readl_relaxed(q->prod_reg); - if (Q_OVF(prod) != Q_OVF(q->prod)) + if (Q_OVF(prod) != Q_OVF(q->llq.prod)) ret = -EOVERFLOW; - q->prod = prod; + q->llq.prod = prod; return ret; } static void queue_sync_prod_out(struct arm_smmu_queue *q) { - writel(q->prod, q->prod_reg); + writel(q->llq.prod, q->prod_reg); } static void queue_inc_prod(struct arm_smmu_queue *q) { - u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); + u32 prod = (Q_WRP(q, q->llq.prod) | Q_IDX(q, q->llq.prod)) + 1; + q->llq.prod = Q_OVF(q->llq.prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } /* @@ -787,7 +791,7 @@ static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) if (queue_full(q)) return -ENOSPC; - queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); + queue_write(Q_ENT(q, q->llq.prod), ent, q->ent_dwords); queue_inc_prod(q); queue_sync_prod_out(q); return 0; @@ -806,7 +810,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) if (queue_empty(q)) return -EAGAIN; - queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); + queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); queue_inc_cons(q); queue_sync_cons_out(q); return 0; @@ -1334,7 +1338,8 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + q->llq.cons = Q_OVF(q->llq.prod) | Q_WRP(q, q->llq.cons) | + Q_IDX(q, q->llq.cons); return IRQ_HANDLED; } @@ -1391,8 +1396,9 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); - writel(q->cons, q->cons_reg); + q->llq.cons = Q_OVF(q->llq.prod) | Q_WRP(q, q->llq.cons) | + Q_IDX(q, q->llq.cons); + writel(q->llq.cons, q->cons_reg); return IRQ_HANDLED; } @@ -2316,13 +2322,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, size_t qsz; do { - qsz = ((1 << q->max_n_shift) * dwords) << 3; + qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (q->base || qsz < PAGE_SIZE) break; - q->max_n_shift--; + q->llq.max_n_shift--; } while (1); if (!q->base) { @@ -2334,7 +2340,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, if (!WARN_ON(q->base_dma & (qsz - 1))) { dev_info(smmu->dev, "allocated %u entries for %s\n", - 1 << q->max_n_shift, name); + 1 << q->llq.max_n_shift, name); } q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); @@ -2343,9 +2349,9 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, q->q_base = Q_BASE_RWA; q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; - q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift); + q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); - q->prod = q->cons = 0; + q->llq.prod = q->llq.cons = 0; return 0; } @@ -2738,8 +2744,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Command queue */ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); - writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD); - writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS); + writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); + writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2766,9 +2772,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, + writel_relaxed(smmu->evtq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); - writel_relaxed(smmu->evtq.q.cons, + writel_relaxed(smmu->evtq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; @@ -2783,9 +2789,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) if (smmu->features & ARM_SMMU_FEAT_PRI) { writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); - writel_relaxed(smmu->priq.q.prod, + writel_relaxed(smmu->priq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); - writel_relaxed(smmu->priq.q.cons, + writel_relaxed(smmu->priq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; @@ -2939,18 +2945,18 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Queue sizes, capped to ensure natural alignment */ - smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_CMDQS, reg)); - if (!smmu->cmdq.q.max_n_shift) { + smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_CMDQS, reg)); + if (!smmu->cmdq.q.llq.max_n_shift) { /* Odd alignment restrictions on the base, so ignore for now */ dev_err(smmu->dev, "unit-length command queue not supported\n"); return -ENXIO; } - smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_EVTQS, reg)); - smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_PRIQS, reg)); + smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_EVTQS, reg)); + smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_PRIQS, reg)); /* SID/SSID sizes */ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); -- cgit v1.2.3 From 7c288a5b27934281d9ea8b5807bc727268b7001a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:16:16 +0100 Subject: iommu/arm-smmu-v3: Operate directly on low-level queue where possible In preparation for rewriting the command queue insertion code to use a new algorithm, rework many of our queue macro accessors and manipulation functions so that they operate on the arm_smmu_ll_queue structure where possible. This will allow us to call these helpers on local variables without having to construct a full-blown arm_smmu_queue on the stack. No functional change. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 58 ++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index cb1aa156974e..9ebb8b39a3b1 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -181,12 +181,13 @@ #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 #define ARM_SMMU_MEMATTR_OIWB 0xf -#define Q_IDX(q, p) ((p) & ((1 << (q)->llq.max_n_shift) - 1)) -#define Q_WRP(q, p) ((p) & (1 << (q)->llq.max_n_shift)) +#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) +#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) #define Q_OVERFLOW_FLAG (1 << 31) #define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ - Q_IDX(q, p) * (q)->ent_dwords) + Q_IDX(&((q)->llq), p) * \ + (q)->ent_dwords) #define Q_BASE_RWA (1UL << 62) #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) @@ -689,16 +690,16 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } /* Low-level queue manipulation functions */ -static bool queue_full(struct arm_smmu_queue *q) +static bool queue_full(struct arm_smmu_ll_queue *q) { - return Q_IDX(q, q->llq.prod) == Q_IDX(q, q->llq.cons) && - Q_WRP(q, q->llq.prod) != Q_WRP(q, q->llq.cons); + return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && + Q_WRP(q, q->prod) != Q_WRP(q, q->cons); } -static bool queue_empty(struct arm_smmu_queue *q) +static bool queue_empty(struct arm_smmu_ll_queue *q) { - return Q_IDX(q, q->llq.prod) == Q_IDX(q, q->llq.cons) && - Q_WRP(q, q->llq.prod) == Q_WRP(q, q->llq.cons); + return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && + Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } static void queue_sync_cons_in(struct arm_smmu_queue *q) @@ -716,10 +717,10 @@ static void queue_sync_cons_out(struct arm_smmu_queue *q) writel_relaxed(q->llq.cons, q->cons_reg); } -static void queue_inc_cons(struct arm_smmu_queue *q) +static void queue_inc_cons(struct arm_smmu_ll_queue *q) { - u32 cons = (Q_WRP(q, q->llq.cons) | Q_IDX(q, q->llq.cons)) + 1; - q->llq.cons = Q_OVF(q->llq.cons) | Q_WRP(q, cons) | Q_IDX(q, cons); + u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } static int queue_sync_prod_in(struct arm_smmu_queue *q) @@ -739,10 +740,10 @@ static void queue_sync_prod_out(struct arm_smmu_queue *q) writel(q->llq.prod, q->prod_reg); } -static void queue_inc_prod(struct arm_smmu_queue *q) +static void queue_inc_prod(struct arm_smmu_ll_queue *q) { - u32 prod = (Q_WRP(q, q->llq.prod) | Q_IDX(q, q->llq.prod)) + 1; - q->llq.prod = Q_OVF(q->llq.prod) | Q_WRP(q, prod) | Q_IDX(q, prod); + u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; + q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } /* @@ -759,7 +760,8 @@ static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : ARM_SMMU_POLL_TIMEOUT_US); - while (queue_sync_cons_in(q), (sync ? !queue_empty(q) : queue_full(q))) { + while (queue_sync_cons_in(q), + (sync ? !queue_empty(&q->llq) : queue_full(&q->llq))) { if (ktime_compare(ktime_get(), timeout) > 0) return -ETIMEDOUT; @@ -788,11 +790,11 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) { - if (queue_full(q)) + if (queue_full(&q->llq)) return -ENOSPC; queue_write(Q_ENT(q, q->llq.prod), ent, q->ent_dwords); - queue_inc_prod(q); + queue_inc_prod(&q->llq); queue_sync_prod_out(q); return 0; } @@ -807,11 +809,11 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) { - if (queue_empty(q)) + if (queue_empty(&q->llq)) return -EAGAIN; queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); - queue_inc_cons(q); + queue_inc_cons(&q->llq); queue_sync_cons_out(q); return 0; } @@ -1316,6 +1318,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) int i; struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->evtq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[EVTQ_ENT_DWORDS]; do { @@ -1335,11 +1338,11 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) */ if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->llq.cons = Q_OVF(q->llq.prod) | Q_WRP(q, q->llq.cons) | - Q_IDX(q, q->llq.cons); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); return IRQ_HANDLED; } @@ -1385,6 +1388,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) { struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->priq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[PRIQ_ENT_DWORDS]; do { @@ -1393,12 +1397,12 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->llq.cons = Q_OVF(q->llq.prod) | Q_WRP(q, q->llq.cons) | - Q_IDX(q, q->llq.cons); - writel(q->llq.cons, q->cons_reg); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); + queue_sync_cons_out(q); return IRQ_HANDLED; } -- cgit v1.2.3 From 587e6c10a7ce89a5924fdbeff2ec524fbd6a124b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:16:25 +0100 Subject: iommu/arm-smmu-v3: Reduce contention during command-queue insertion The SMMU command queue is a bottleneck in large systems, thanks to the spin_lock which serialises accesses from all CPUs to the single queue supported by the hardware. Attempt to improve this situation by moving to a new algorithm for inserting commands into the queue, which is lock-free on the fast-path. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 677 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 533 insertions(+), 144 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 9ebb8b39a3b1..202b4b6fc70a 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -183,7 +183,7 @@ #define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) #define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) -#define Q_OVERFLOW_FLAG (1 << 31) +#define Q_OVERFLOW_FLAG (1U << 31) #define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ Q_IDX(&((q)->llq), p) * \ @@ -307,6 +307,8 @@ #define CMDQ_ERR_CERROR_ABT_IDX 2 #define CMDQ_ERR_CERROR_ATC_INV_IDX 3 +#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG + #define CMDQ_0_OP GENMASK_ULL(7, 0) #define CMDQ_0_SSV (1UL << 11) @@ -369,9 +371,8 @@ #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) /* High-level queue structures */ -#define ARM_SMMU_POLL_TIMEOUT_US 100 -#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */ -#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10 +#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */ +#define ARM_SMMU_POLL_SPIN_COUNT 10 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -473,15 +474,24 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_CMD_SYNC 0x46 struct { - u32 msidata; u64 msiaddr; } sync; }; }; struct arm_smmu_ll_queue { - u32 prod; - u32 cons; + union { + u64 val; + struct { + u32 prod; + u32 cons; + }; + struct { + atomic_t prod; + atomic_t cons; + } atomic; + u8 __pad[SMP_CACHE_BYTES]; + } ____cacheline_aligned_in_smp; u32 max_n_shift; }; @@ -499,9 +509,18 @@ struct arm_smmu_queue { u32 __iomem *cons_reg; }; +struct arm_smmu_queue_poll { + ktime_t timeout; + unsigned int delay; + unsigned int spin_cnt; + bool wfe; +}; + struct arm_smmu_cmdq { struct arm_smmu_queue q; - spinlock_t lock; + atomic_long_t *valid_map; + atomic_t owner_prod; + atomic_t lock; }; struct arm_smmu_evtq { @@ -581,8 +600,6 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; - u32 sync_nr; - u8 prev_cmd_opcode; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -601,12 +618,6 @@ struct arm_smmu_device { struct arm_smmu_strtab_cfg strtab_cfg; - /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */ - union { - u32 sync_count; - u64 padding; - }; - /* IOMMU core code handle */ struct iommu_device iommu; }; @@ -690,6 +701,21 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } /* Low-level queue manipulation functions */ +static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q, q->prod); + cons = Q_IDX(q, q->cons); + + if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) + space = (1 << q->max_n_shift) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + static bool queue_full(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && @@ -702,9 +728,12 @@ static bool queue_empty(struct arm_smmu_ll_queue *q) Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } -static void queue_sync_cons_in(struct arm_smmu_queue *q) +static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) { - q->llq.cons = readl_relaxed(q->cons_reg); + return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || + ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); } static void queue_sync_cons_out(struct arm_smmu_queue *q) @@ -735,46 +764,34 @@ static int queue_sync_prod_in(struct arm_smmu_queue *q) return ret; } -static void queue_sync_prod_out(struct arm_smmu_queue *q) +static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) { - writel(q->llq.prod, q->prod_reg); + u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; + return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } -static void queue_inc_prod(struct arm_smmu_ll_queue *q) +static void queue_poll_init(struct arm_smmu_device *smmu, + struct arm_smmu_queue_poll *qp) { - u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); + qp->delay = 1; + qp->spin_cnt = 0; + qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); } -/* - * Wait for the SMMU to consume items. If sync is true, wait until the queue - * is empty. Otherwise, wait until there is at least one free slot. - */ -static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) +static int queue_poll(struct arm_smmu_queue_poll *qp) { - ktime_t timeout; - unsigned int delay = 1, spin_cnt = 0; - - /* Wait longer if it's a CMD_SYNC */ - timeout = ktime_add_us(ktime_get(), sync ? - ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : - ARM_SMMU_POLL_TIMEOUT_US); - - while (queue_sync_cons_in(q), - (sync ? !queue_empty(&q->llq) : queue_full(&q->llq))) { - if (ktime_compare(ktime_get(), timeout) > 0) - return -ETIMEDOUT; + if (ktime_compare(ktime_get(), qp->timeout) > 0) + return -ETIMEDOUT; - if (wfe) { - wfe(); - } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) { - cpu_relax(); - continue; - } else { - udelay(delay); - delay *= 2; - spin_cnt = 0; - } + if (qp->wfe) { + wfe(); + } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { + cpu_relax(); + } else { + udelay(qp->delay); + qp->delay *= 2; + qp->spin_cnt = 0; } return 0; @@ -788,17 +805,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) *dst++ = cpu_to_le64(*src++); } -static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) -{ - if (queue_full(&q->llq)) - return -ENOSPC; - - queue_write(Q_ENT(q, q->llq.prod), ent, q->ent_dwords); - queue_inc_prod(&q->llq); - queue_sync_prod_out(q); - return 0; -} - static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) { int i; @@ -881,20 +887,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); break; case CMDQ_OP_CMD_SYNC: - if (ent->sync.msiaddr) + if (ent->sync.msiaddr) { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); - else + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; + } else { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); + } cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); - /* - * Commands are written little-endian, but we want the SMMU to - * receive MSIData, and thus write it back to memory, in CPU - * byte order, so big-endian needs an extra byteswap here. - */ - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, - cpu_to_le32(ent->sync.msidata)); - cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; break; default: return -ENOENT; @@ -903,6 +903,27 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } +static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, + u32 prod) +{ + struct arm_smmu_queue *q = &smmu->cmdq.q; + struct arm_smmu_cmdq_ent ent = { + .opcode = CMDQ_OP_CMD_SYNC, + }; + + /* + * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI + * payload, so the write will zero the entire command on that platform. + */ + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) { + ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * + q->ent_dwords * 8; + } + + arm_smmu_cmdq_build_cmd(cmd, &ent); +} + static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) { static const char *cerror_str[] = { @@ -961,109 +982,440 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); } -static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) +/* + * Command queue locking. + * This is a form of bastardised rwlock with the following major changes: + * + * - The only LOCK routines are exclusive_trylock() and shared_lock(). + * Neither have barrier semantics, and instead provide only a control + * dependency. + * + * - The UNLOCK routines are supplemented with shared_tryunlock(), which + * fails if the caller appears to be the last lock holder (yes, this is + * racy). All successful UNLOCK routines have RELEASE semantics. + */ +static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) { - struct arm_smmu_queue *q = &smmu->cmdq.q; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + int val; - smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]); + /* + * We can try to avoid the cmpxchg() loop by simply incrementing the + * lock counter. When held in exclusive state, the lock counter is set + * to INT_MIN so these increments won't hurt as the value will remain + * negative. + */ + if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) + return; + + do { + val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); + } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); +} + +static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) +{ + (void)atomic_dec_return_release(&cmdq->lock); +} + +static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) +{ + if (atomic_read(&cmdq->lock) == 1) + return false; + + arm_smmu_cmdq_shared_unlock(cmdq); + return true; +} + +#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ +({ \ + bool __ret; \ + local_irq_save(flags); \ + __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) + +#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ +({ \ + atomic_set_release(&cmdq->lock, 0); \ + local_irq_restore(flags); \ +}) + + +/* + * Command queue insertion. + * This is made fiddly by our attempts to achieve some sort of scalability + * since there is one queue shared amongst all of the CPUs in the system. If + * you like mixed-size concurrency, dependency ordering and relaxed atomics, + * then you'll *love* this monstrosity. + * + * The basic idea is to split the queue up into ranges of commands that are + * owned by a given CPU; the owner may not have written all of the commands + * itself, but is responsible for advancing the hardware prod pointer when + * the time comes. The algorithm is roughly: + * + * 1. Allocate some space in the queue. At this point we also discover + * whether the head of the queue is currently owned by another CPU, + * or whether we are the owner. + * + * 2. Write our commands into our allocated slots in the queue. + * + * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. + * + * 4. If we are an owner: + * a. Wait for the previous owner to finish. + * b. Mark the queue head as unowned, which tells us the range + * that we are responsible for publishing. + * c. Wait for all commands in our owned range to become valid. + * d. Advance the hardware prod pointer. + * e. Tell the next owner we've finished. + * + * 5. If we are inserting a CMD_SYNC (we may or may not have been an + * owner), then we need to stick around until it has completed: + * a. If we have MSIs, the SMMU can write back into the CMD_SYNC + * to clear the first 4 bytes. + * b. Otherwise, we spin waiting for the hardware cons pointer to + * advance past our command. + * + * The devil is in the details, particularly the use of locking for handling + * SYNC completion and freeing up space in the queue before we think that it is + * full. + */ +static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod, bool set) +{ + u32 swidx, sbidx, ewidx, ebidx; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = sprod, + }; - while (queue_insert_raw(q, cmd) == -ENOSPC) { - if (queue_poll_cons(q, false, wfe)) - dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + ewidx = BIT_WORD(Q_IDX(&llq, eprod)); + ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; + + while (llq.prod != eprod) { + unsigned long mask; + atomic_long_t *ptr; + u32 limit = BITS_PER_LONG; + + swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); + sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; + + ptr = &cmdq->valid_map[swidx]; + + if ((swidx == ewidx) && (sbidx < ebidx)) + limit = ebidx; + + mask = GENMASK(limit - 1, sbidx); + + /* + * The valid bit is the inverse of the wrap bit. This means + * that a zero-initialised queue is invalid and, after marking + * all entries as valid, they become invalid again when we + * wrap. + */ + if (set) { + atomic_long_xor(mask, ptr); + } else { /* Poll */ + unsigned long valid; + + valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; + atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); + } + + llq.prod = queue_inc_prod_n(&llq, limit - sbidx); } } -static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq_ent *ent) +/* Mark all entries in the range [sprod, eprod) as valid */ +static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); +} + +/* Wait for all entries in the range [sprod, eprod) to become valid */ +static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); +} + +/* Wait for the command queue to become non-full */ +static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; unsigned long flags; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + int ret = 0; - if (arm_smmu_cmdq_build_cmd(cmd, ent)) { - dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", - ent->opcode); - return; + /* + * Try to update our copy of cons by grabbing exclusive cmdq access. If + * that fails, spin until somebody else updates it for us. + */ + if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { + WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); + arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); + llq->val = READ_ONCE(cmdq->q.llq.val); + return 0; } - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + do { + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + if (!queue_full(llq)) + break; + + ret = queue_poll(&qp); + } while (!ret); + + return ret; } /* - * The difference between val and sync_idx is bounded by the maximum size of - * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. + * Wait until the SMMU signals a CMD_SYNC completion MSI. + * Must be called with the cmdq lock held in some capacity. */ -static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) +static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - ktime_t timeout; - u32 val; + int ret = 0; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); - timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US); - val = smp_cond_load_acquire(&smmu->sync_count, - (int)(VAL - sync_idx) >= 0 || - !ktime_before(ktime_get(), timeout)); + queue_poll_init(smmu, &qp); - return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; + /* + * The MSI won't generate an event, since it's being written back + * into the command queue. + */ + qp.wfe = false; + smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); + llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); + return ret; } -static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) +/* + * Wait until the SMMU cons index passes llq->prod. + * Must be called with the cmdq lock held in some capacity. + */ +static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; - struct arm_smmu_cmdq_ent ent = { - .opcode = CMDQ_OP_CMD_SYNC, - .sync = { - .msiaddr = virt_to_phys(&smmu->sync_count), - }, - }; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 prod = llq->prod; + int ret = 0; - spin_lock_irqsave(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + do { + if (queue_consumed(llq, prod)) + break; - /* Piggy-back on the previous command if it's a SYNC */ - if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) { - ent.sync.msidata = smmu->sync_nr; - } else { - ent.sync.msidata = ++smmu->sync_nr; - arm_smmu_cmdq_build_cmd(cmd, &ent); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - } + ret = queue_poll(&qp); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + /* + * This needs to be a readl() so that our subsequent call + * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. + * + * Specifically, we need to ensure that we observe all + * shared_lock()s by other CMD_SYNCs that share our owner, + * so that a failing call to tryunlock() means that we're + * the last one out and therefore we can safely advance + * cmdq->q.llq.cons. Roughly speaking: + * + * CPU 0 CPU1 CPU2 (us) + * + * if (sync) + * shared_lock(); + * + * dma_wmb(); + * set_valid_map(); + * + * if (owner) { + * poll_valid_map(); + * + * writel(prod_reg); + * + * readl(cons_reg); + * tryunlock(); + * + * Requires us to see CPU 0's shared_lock() acquisition. + */ + llq->cons = readl(cmdq->q.cons_reg); + } while (!ret); - return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); + return ret; } -static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) + return __arm_smmu_cmdq_poll_until_msi(smmu, llq); + + return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); +} + +static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, + u32 prod, int n) +{ + int i; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = prod, + }; + + for (i = 0; i < n; ++i) { + u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; + + prod = queue_inc_prod_n(&llq, i); + queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); + } +} + +static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + u64 *cmds, int n, bool sync) +{ + u64 cmd_sync[CMDQ_ENT_DWORDS]; + u32 prod; unsigned long flags; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); - struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; - int ret; + bool owner; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + }, head = llq; + int ret = 0; - arm_smmu_cmdq_build_cmd(cmd, &ent); + /* 1. Allocate some space in the queue */ + local_irq_save(flags); + llq.val = READ_ONCE(cmdq->q.llq.val); + do { + u64 old; + + while (!queue_has_space(&llq, n + sync)) { + local_irq_restore(flags); + if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) + dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + local_irq_save(flags); + } + + head.cons = llq.cons; + head.prod = queue_inc_prod_n(&llq, n + sync) | + CMDQ_PROD_OWNED_FLAG; + + old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); + if (old == llq.val) + break; + + llq.val = old; + } while (1); + owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); + head.prod &= ~CMDQ_PROD_OWNED_FLAG; + llq.prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * 2. Write our commands into the queue + * Dependency ordering from the cmpxchg() loop above. + */ + arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); + if (sync) { + prod = queue_inc_prod_n(&llq, n); + arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); + queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); + + /* + * In order to determine completion of our CMD_SYNC, we must + * ensure that the queue can't wrap twice without us noticing. + * We achieve that by taking the cmdq lock as shared before + * marking our slot as valid. + */ + arm_smmu_cmdq_shared_lock(cmdq); + } - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + /* 3. Mark our slots as valid, ensuring commands are visible first */ + dma_wmb(); + arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); + /* 4. If we are the owner, take control of the SMMU hardware */ + if (owner) { + /* a. Wait for previous owner to finish */ + atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); + + /* b. Stop gathering work by clearing the owned flag */ + prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, + &cmdq->q.llq.atomic.prod); + prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * c. Wait for any gathered work to be written to the queue. + * Note that we read our own entries so that we have the control + * dependency required by (d). + */ + arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); + + /* + * d. Advance the hardware prod pointer + * Control dependency ordering from the entries becoming valid. + */ + writel_relaxed(prod, cmdq->q.prod_reg); + + /* + * e. Tell the next owner we're done + * Make sure we've updated the hardware first, so that we don't + * race to update prod and potentially move it backwards. + */ + atomic_set_release(&cmdq->owner_prod, prod); + } + + /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ + if (sync) { + llq.prod = queue_inc_prod_n(&llq, n); + ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); + if (ret) { + dev_err_ratelimited(smmu->dev, + "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", + llq.prod, + readl_relaxed(cmdq->q.prod_reg), + readl_relaxed(cmdq->q.cons_reg)); + } + + /* + * Try to unlock the cmq lock. This will fail if we're the last + * reader, in which case we can safely update cmdq->q.llq.cons + */ + if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { + WRITE_ONCE(cmdq->q.llq.cons, llq.cons); + arm_smmu_cmdq_shared_unlock(cmdq); + } + } + + local_irq_restore(flags); return ret; } -static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { - int ret; - bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && - (smmu->features & ARM_SMMU_FEAT_COHERENCY); + u64 cmd[CMDQ_ENT_DWORDS]; - ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) - : __arm_smmu_cmdq_issue_sync(smmu); - if (ret) - dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); - return ret; + if (arm_smmu_cmdq_build_cmd(cmd, ent)) { + dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", + ent->opcode); + return -EINVAL; + } + + return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); +} + +static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); } /* Context descriptor manipulation functions */ @@ -1580,9 +1932,9 @@ static void arm_smmu_tlb_inv_context(void *cookie) /* * NOTE: when io-pgtable is in non-strict mode, we may get here with * PTEs previously cleared by unmaps on the current CPU not yet visible - * to the SMMU. We are relying on the DSB implicit in - * queue_sync_prod_out() to guarantee those are observed before the - * TLBI. Do be careful, 007. + * to the SMMU. We are relying on the dma_wmb() implicit during cmd + * insertion to guarantee those are observed before the TLBI. Do be + * careful, 007. */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); arm_smmu_cmdq_issue_sync(smmu); @@ -2359,18 +2711,49 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return 0; } +static void arm_smmu_cmdq_free_bitmap(void *data) +{ + unsigned long *bitmap = data; + bitmap_free(bitmap); +} + +static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) +{ + int ret = 0; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + unsigned int nents = 1 << cmdq->q.llq.max_n_shift; + atomic_long_t *bitmap; + + atomic_set(&cmdq->owner_prod, 0); + atomic_set(&cmdq->lock, 0); + + bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); + if (!bitmap) { + dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); + ret = -ENOMEM; + } else { + cmdq->valid_map = bitmap; + devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); + } + + return ret; +} + static int arm_smmu_init_queues(struct arm_smmu_device *smmu) { int ret; /* cmdq */ - spin_lock_init(&smmu->cmdq.lock); ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, "cmdq"); if (ret) return ret; + ret = arm_smmu_cmdq_init(smmu); + if (ret) + return ret; + /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, @@ -2951,9 +3334,15 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) /* Queue sizes, capped to ensure natural alignment */ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, reg)); - if (!smmu->cmdq.q.llq.max_n_shift) { - /* Odd alignment restrictions on the base, so ignore for now */ - dev_err(smmu->dev, "unit-length command queue not supported\n"); + if (smmu->cmdq.q.llq.max_n_shift < ilog2(BITS_PER_LONG)) { + /* + * The cmdq valid_map relies on the total number of entries + * being a multiple of BITS_PER_LONG. There's also no way + * we can handle the weird alignment restrictions on the + * base pointer for a unit-length queue. + */ + dev_err(smmu->dev, "command queue size < %d entries not supported\n", + BITS_PER_LONG); return -ENXIO; } -- cgit v1.2.3 From 2af2e72b18b499fa36d3f7379fd010ff25d2a984 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 17:16:33 +0100 Subject: iommu/arm-smmu-v3: Defer TLB invalidation until ->iotlb_sync() Update the iommu_iotlb_gather structure passed to ->tlb_add_page() and use this information to defer all TLB invalidation until ->iotlb_sync(). This drastically reduces contention on the command queue, since we can insert our commands in batches rather than one-by-one. Tested-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 71 +++++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 202b4b6fc70a..b36a99971401 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -309,6 +309,13 @@ #define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG +/* + * This is used to size the command queue and therefore must be at least + * BITS_PER_LONG so that the valid_map works correctly (it relies on the + * total number of queue entries being a multiple of BITS_PER_LONG). + */ +#define CMDQ_BATCH_ENTRIES BITS_PER_LONG + #define CMDQ_0_OP GENMASK_ULL(7, 0) #define CMDQ_0_SSV (1UL << 11) @@ -1940,15 +1947,17 @@ static void arm_smmu_tlb_inv_context(void *cookie) arm_smmu_cmdq_issue_sync(smmu); } -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, + size_t granule, bool leaf, + struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_domain *smmu_domain = cookie; + u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; struct arm_smmu_device *smmu = smmu_domain->smmu; + unsigned long end = iova + size; + int i = 0; struct arm_smmu_cmdq_ent cmd = { .tlbi = { .leaf = leaf, - .addr = iova, }, }; @@ -1960,37 +1969,41 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } - do { - arm_smmu_cmdq_issue_cmd(smmu, &cmd); - cmd.tlbi.addr += granule; - } while (size -= granule); + while (iova < end) { + if (i == CMDQ_BATCH_ENTRIES) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false); + i = 0; + } + + cmd.tlbi.addr = iova; + arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd); + iova += granule; + i++; + } + + arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); } static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); + struct arm_smmu_domain *smmu_domain = cookie; + struct iommu_domain *domain = &smmu_domain->domain; + + iommu_iotlb_gather_add_page(domain, gather, iova, granule); } static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_device *smmu = smmu_domain->smmu; - - arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie); - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); } static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_device *smmu = smmu_domain->smmu; - - arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie); - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_tlb_inv_range(iova, size, granule, true, cookie); } static const struct iommu_flush_ops arm_smmu_flush_ops = { @@ -2404,10 +2417,10 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) static void arm_smmu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { - struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (smmu) - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, + gather->pgsize, true, smmu_domain); } static phys_addr_t @@ -3334,15 +3347,15 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) /* Queue sizes, capped to ensure natural alignment */ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, reg)); - if (smmu->cmdq.q.llq.max_n_shift < ilog2(BITS_PER_LONG)) { + if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { /* - * The cmdq valid_map relies on the total number of entries - * being a multiple of BITS_PER_LONG. There's also no way - * we can handle the weird alignment restrictions on the - * base pointer for a unit-length queue. + * We don't support splitting up batches, so one batch of + * commands plus an extra sync needs to fit inside the command + * queue. There's also no way we can handle the weird alignment + * restrictions on the base pointer for a unit-length queue. */ - dev_err(smmu->dev, "command queue size < %d entries not supported\n", - BITS_PER_LONG); + dev_err(smmu->dev, "command queue size <= %d entries not supported\n", + CMDQ_BATCH_ENTRIES); return -ENXIO; } -- cgit v1.2.3 From b9c6ff94e43a0ee053e0c1d983fba1ac4953b762 Mon Sep 17 00:00:00 2001 From: "Suthikulpanit, Suravee" Date: Tue, 23 Jul 2019 19:00:37 +0000 Subject: iommu/amd: Re-factor guest virtual APIC (de-)activation code Re-factore the logic for activate/deactivate guest virtual APIC mode (GAM) into helper functions, and export them for other drivers (e.g. SVM). to support run-time activate/deactivate of SVM AVIC. Cc: Joerg Roedel Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 85 +++++++++++++++++++++++++++++------------ drivers/iommu/amd_iommu_types.h | 9 +++++ include/linux/amd-iommu.h | 12 ++++++ 3 files changed, 82 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..008da21a2592 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4313,13 +4313,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = { .deactivate = irq_remapping_deactivate, }; +int amd_iommu_activate_guest_mode(void *data) +{ + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || + !entry || entry->lo.fields_vapic.guest_mode) + return 0; + + entry->lo.val = 0; + entry->hi.val = 0; + + entry->lo.fields_vapic.guest_mode = 1; + entry->lo.fields_vapic.ga_log_intr = 1; + entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; + entry->hi.fields.vector = ir_data->ga_vector; + entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; + + return modify_irte_ga(ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry, NULL); +} +EXPORT_SYMBOL(amd_iommu_activate_guest_mode); + +int amd_iommu_deactivate_guest_mode(void *data) +{ + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + struct irq_cfg *cfg = ir_data->cfg; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || + !entry || !entry->lo.fields_vapic.guest_mode) + return 0; + + entry->lo.val = 0; + entry->hi.val = 0; + + entry->lo.fields_remap.dm = apic->irq_dest_mode; + entry->lo.fields_remap.int_type = apic->irq_delivery_mode; + entry->hi.fields.vector = cfg->vector; + entry->lo.fields_remap.destination = + APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); + entry->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); + + return modify_irte_ga(ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry, NULL); +} +EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); + static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) { + int ret; struct amd_iommu *iommu; struct amd_iommu_pi_data *pi_data = vcpu_info; struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; struct amd_ir_data *ir_data = data->chip_data; - struct irte_ga *irte = (struct irte_ga *) ir_data->entry; struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); @@ -4330,6 +4379,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) if (!dev_data || !dev_data->use_vapic) return 0; + ir_data->cfg = irqd_cfg(data); pi_data->ir_data = ir_data; /* Note: @@ -4348,37 +4398,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) pi_data->prev_ga_tag = ir_data->cached_ga_tag; if (pi_data->is_guest_mode) { - /* Setting */ - irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); - irte->hi.fields.vector = vcpu_pi_info->vector; - irte->lo.fields_vapic.ga_log_intr = 1; - irte->lo.fields_vapic.guest_mode = 1; - irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; - - ir_data->cached_ga_tag = pi_data->ga_tag; + ir_data->ga_root_ptr = (pi_data->base >> 12); + ir_data->ga_vector = vcpu_pi_info->vector; + ir_data->ga_tag = pi_data->ga_tag; + ret = amd_iommu_activate_guest_mode(ir_data); + if (!ret) + ir_data->cached_ga_tag = pi_data->ga_tag; } else { - /* Un-Setting */ - struct irq_cfg *cfg = irqd_cfg(data); - - irte->hi.val = 0; - irte->lo.val = 0; - irte->hi.fields.vector = cfg->vector; - irte->lo.fields_remap.guest_mode = 0; - irte->lo.fields_remap.destination = - APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); - irte->hi.fields.destination = - APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); - irte->lo.fields_remap.int_type = apic->irq_delivery_mode; - irte->lo.fields_remap.dm = apic->irq_dest_mode; + ret = amd_iommu_deactivate_guest_mode(ir_data); /* * This communicates the ga_tag back to the caller * so that it can do all the necessary clean up. */ - ir_data->cached_ga_tag = 0; + if (!ret) + ir_data->cached_ga_tag = 0; } - return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); + return ret; } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 64edd5a9694c..9ac229e92b07 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -873,6 +873,15 @@ struct amd_ir_data { struct msi_msg msi_entry; void *entry; /* Pointer to union irte or struct irte_ga */ void *ref; /* Pointer to the actual irte */ + + /** + * Store information for activate/de-activate + * Guest virtual APIC mode during runtime. + */ + struct irq_cfg *cfg; + int ga_vector; + int ga_root_ptr; + int ga_tag; }; struct amd_irte_ops { diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 4a4d00646040..21e950e4ab62 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, bool is_run, void *data); +extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_deactivate_guest_mode(void *data); + #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ static inline int @@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data) return 0; } +static inline int amd_iommu_activate_guest_mode(void *data) +{ + return 0; +} + +static inline int amd_iommu_deactivate_guest_mode(void *data) +{ + return 0; +} #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ #endif /* _ASM_X86_AMD_IOMMU_H */ -- cgit v1.2.3 From 086f9efae7e96ac2c829d33ca3d513caa3bbb225 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 30 Jul 2019 11:15:22 -0700 Subject: iommu: Remove dev_err() usage after platform_get_irq() We don't need dev_err() messages when platform_get_irq() fails now that platform_get_irq() prints an error message itself when something goes wrong. Let's remove these prints with a simple semantic patch. // @@ expression ret; struct platform_device *E; @@ ret = ( platform_get_irq(E, ...) | platform_get_irq_byname(E, ...) ); if ( \( ret < 0 \| ret <= 0 \) ) { ( -if (ret != -EPROBE_DEFER) -{ ... -dev_err(...); -... } | ... -dev_err(...); ) ... } // While we're here, remove braces on if statements that only have one statement (manually). Cc: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: Greg Kroah-Hartman Signed-off-by: Stephen Boyd Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 4 +--- drivers/iommu/msm_iommu.c | 1 - drivers/iommu/qcom_iommu.c | 4 +--- 3 files changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index b0c1e5f9daae..1934c16a5abc 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -583,10 +583,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) return PTR_ERR(data->sfrbase); irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "Unable to find IRQ resource\n"); + if (irq <= 0) return irq; - } ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, dev_name(dev), data); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b25e2eb9e038..3df9266abe65 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -750,7 +750,6 @@ static int msm_iommu_probe(struct platform_device *pdev) iommu->irq = platform_get_irq(pdev, 0); if (iommu->irq < 0) { - dev_err(iommu->dev, "could not get iommu irq\n"); ret = -ENODEV; goto fail; } diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..fb45486c6d14 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -696,10 +696,8 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev) return PTR_ERR(ctx->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (irq < 0) return -ENODEV; - } /* clear IRQs before registering fault handler, just in case the * boot-loader left us a surprise: -- cgit v1.2.3 From 3846a3b9511c5166082a93536d919a9c42abcd91 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 7 Aug 2019 11:26:45 +0300 Subject: iommu/omap: fix boot issue on remoteprocs with AMMU/Unicache Support has been added to the OMAP IOMMU driver to fix a boot hang issue on OMAP remoteprocs with AMMU/Unicache, caused by an improper AMMU/Unicache state upon initial deassertion of the processor reset. The issue is described in detail in the next three paragraphs. All the Cortex M3/M4 IPU processor subsystems in OMAP SoCs have a AMMU/Unicache IP that dictates the memory attributes for addresses seen by the processor cores. The AMMU/Unicache is configured/enabled by the SCACHE_CONFIG.BYPASS bit - a value of 1 enables the cache and mandates all addresses accessed by M3/M4 be defined in the AMMU. This bit is not programmable from the host processor. The M3/M4 boot sequence starts out with the AMMU/Unicache in disabled state, and SYS/BIOS programs the AMMU regions and enables the Unicache during one of its initial boot steps. This SCACHE_CONFIG.BYPASS bit is however enabled by default whenever a RET reset is applied to the IP, irrespective of whether it was previously enabled or not. The AMMU registers lose their context whenever this reset is applied. The reset is effective as long as the MMU portion of the subsystem is enabled and clocked. This behavior is common to all the IPU and DSP subsystems that have an AMMU/Unicache. The IPU boot sequence involves enabling and programming the MMU, and loading the processor and releasing the reset(s) for the processor. The PM setup code currently sets the target state for most of the power domains to RET. The L2 MMU can be enabled, programmed and accessed properly just fine with the domain in hardware supervised mode, while the power domain goes through a RET->ON->RET transition during the programming sequence. However, the ON->RET transition asserts a RET reset, and the SCACHE_CONFIG.BYPASS bit gets auto-set. An AMMU fault is thrown immediately when the M3/M4 core's reset is released since the first instruction address itself will not be defined in any valid AMMU regions. The ON->RET transition happens automatically on the power domain after enabling the iommu due to the hardware supervised mode. This patch adds and invokes the .set_pwrdm_constraint pdata ops, if present, during the OMAP IOMMU enable and disable functions to resolve the above boot hang issue. The ops will allow to invoke a mach-omap2 layer API pwrdm_set_next_pwrst() in a multi-arch kernel environment. The ops also returns the current power domain state while enforcing the constraint so that the driver can store it and use it to set back the power domain state while releasing the constraint. The pdata ops implementation restricts the target power domain to ON during enable, and back to the original power domain state during disable, and thereby eliminating the conditions for the boot issue. The implementation is effective only when the original power domain state is either RET or OFF, and is a no-op when it is ON or INACTIVE. The .set_pwrdm_constraint ops need to be plugged in pdata-quirks for the affected remote processors to be able to boot properly. Note that the current issue is seen only on kernels with the affected power domains programmed to enter RET. For eg., IPU1 on DRA7xx is in a separate domain and is susceptible to this bug, while the IPU2 subsystem is within CORE power domain, and CORE RET is not supported on this SoC. IPUs on OMAP4 and OMAP5 are also susceptible since they are in CORE power domain, and CORE RET is a valid power target on these SoCs. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- arch/arm/mach-omap2/Makefile | 2 ++ arch/arm/mach-omap2/omap-iommu.c | 43 ++++++++++++++++++++++++++++++++ drivers/iommu/omap-iommu.c | 17 +++++++++++++ drivers/iommu/omap-iommu.h | 2 ++ include/linux/platform_data/iommu-omap.h | 2 ++ 5 files changed, 66 insertions(+) create mode 100644 arch/arm/mach-omap2/omap-iommu.c (limited to 'drivers') diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 600650551621..d4f11c5070ae 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -229,3 +229,5 @@ include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORC $(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h targets += pm-asm-offsets.s + +obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c new file mode 100644 index 000000000000..f1a6ece8108e --- /dev/null +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * OMAP IOMMU quirks for various TI SoCs + * + * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Suman Anna + */ + +#include +#include + +#include "omap_hwmod.h" +#include "omap_device.h" +#include "powerdomain.h" + +int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, + u8 *pwrst) +{ + struct powerdomain *pwrdm; + struct omap_device *od; + u8 next_pwrst; + + od = to_omap_device(pdev); + if (!od) + return -ENODEV; + + if (od->hwmods_cnt != 1) + return -EINVAL; + + pwrdm = omap_hwmod_get_pwrdm(od->hwmods[0]); + if (!pwrdm) + return -EINVAL; + + if (request) + *pwrst = pwrdm_read_next_pwrst(pwrdm); + + if (*pwrst > PWRDM_POWER_RET) + return 0; + + next_pwrst = request ? PWRDM_POWER_ON : *pwrst; + + return pwrdm_set_next_pwrst(pwrdm, next_pwrst); +} diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index dfb961d8c21b..84b99d5841ae 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -190,6 +190,14 @@ static int iommu_enable(struct omap_iommu *obj) struct platform_device *pdev = to_platform_device(obj->dev); struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); + if (pdata && pdata->set_pwrdm_constraint) { + err = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); + if (err) { + dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", + err); + } + } + if (pdata && pdata->deassert_reset) { err = pdata->deassert_reset(pdev, pdata->reset_name); if (err) { @@ -209,6 +217,7 @@ static void iommu_disable(struct omap_iommu *obj) { struct platform_device *pdev = to_platform_device(obj->dev); struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); + int ret; omap2_iommu_disable(obj); @@ -216,6 +225,14 @@ static void iommu_disable(struct omap_iommu *obj) if (pdata && pdata->assert_reset) pdata->assert_reset(pdev, pdata->reset_name); + + if (pdata && pdata->set_pwrdm_constraint) { + ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); + if (ret) { + dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", + ret); + } + } } /* diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 09968a02d291..aac1ca65ef9d 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -78,6 +78,8 @@ struct omap_iommu { struct iommu_device iommu; struct iommu_group *group; + + u8 pwrst; }; /** diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 44d913a7580c..1ed60265a20e 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -13,4 +13,6 @@ struct iommu_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); + int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request, + u8 *pwrst); }; -- cgit v1.2.3 From db8918f61d51b87aaf7554ab7647ccdb7dbd739f Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 7 Aug 2019 11:26:47 +0300 Subject: iommu/omap: streamline enable/disable through runtime pm callbacks The OMAP IOMMU devices are typically present within the respective client processor subsystem and have their own dedicated hard-reset line. Enabling an IOMMU requires the reset line to be deasserted and the clocks to be enabled before programming the necessary IOMMU registers. The IOMMU disable sequence follow the reverse order of enabling. The OMAP IOMMU driver programs the reset lines through pdata ops to invoke the omap_device_assert/deassert_hardreset API. The clocks are managed through the pm_runtime framework, and the callbacks associated with the device's pm_domain, implemented in the omap_device layer. Streamline the enable and disable sequences in the OMAP IOMMU driver by implementing all the above operations within the runtime pm callbacks. All the OMAP devices have device pm_domain callbacks plugged in the omap_device layer for automatic runtime management of the clocks. Invoking the reset management functions within the runtime pm callbacks in OMAP IOMMU driver therefore requires that the default device's pm domain callbacks in the omap_device layer be reset, as the ordering sequence for managing the reset lines and clocks from the pm_domain callbacks don't gel well with the implementation in the IOMMU driver callbacks. The omap_device_enable/omap_device_idle functions are invoked through the newly added pdata ops. Consolidating all the device management sequences within the runtime pm callbacks allows the driver to easily support both system suspend/resume and runtime suspend/resume using common code. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 139 ++++++++++++++++++++++++++++++++------------- 1 file changed, 99 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 84b99d5841ae..fbceae3c2ee7 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -186,53 +186,18 @@ static void omap2_iommu_disable(struct omap_iommu *obj) static int iommu_enable(struct omap_iommu *obj) { - int err; - struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - - if (pdata && pdata->set_pwrdm_constraint) { - err = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); - if (err) { - dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", - err); - } - } - - if (pdata && pdata->deassert_reset) { - err = pdata->deassert_reset(pdev, pdata->reset_name); - if (err) { - dev_err(obj->dev, "deassert_reset failed: %d\n", err); - return err; - } - } - - pm_runtime_get_sync(obj->dev); + int ret; - err = omap2_iommu_enable(obj); + ret = pm_runtime_get_sync(obj->dev); + if (ret < 0) + pm_runtime_put_noidle(obj->dev); - return err; + return ret < 0 ? ret : 0; } static void iommu_disable(struct omap_iommu *obj) { - struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - int ret; - - omap2_iommu_disable(obj); - pm_runtime_put_sync(obj->dev); - - if (pdata && pdata->assert_reset) - pdata->assert_reset(pdev, pdata->reset_name); - - if (pdata && pdata->set_pwrdm_constraint) { - ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); - if (ret) { - dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", - ret); - } - } } /* @@ -927,6 +892,85 @@ static void omap_iommu_detach(struct omap_iommu *obj) dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); } +/** + * omap_iommu_runtime_suspend - disable an iommu device + * @dev: iommu device + * + * This function performs all that is necessary to disable an + * IOMMU device, either during final detachment from a client + * device, or during system/runtime suspend of the device. This + * includes programming all the appropriate IOMMU registers, and + * managing the associated omap_hwmod's state and the device's + * reset line. + **/ +static int omap_iommu_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iommu_platform_data *pdata = dev_get_platdata(dev); + struct omap_iommu *obj = to_iommu(dev); + int ret; + + omap2_iommu_disable(obj); + + if (pdata && pdata->device_idle) + pdata->device_idle(pdev); + + if (pdata && pdata->assert_reset) + pdata->assert_reset(pdev, pdata->reset_name); + + if (pdata && pdata->set_pwrdm_constraint) { + ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); + if (ret) { + dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", + ret); + } + } + + return 0; +} + +/** + * omap_iommu_runtime_resume - enable an iommu device + * @dev: iommu device + * + * This function performs all that is necessary to enable an + * IOMMU device, either during initial attachment to a client + * device, or during system/runtime resume of the device. This + * includes programming all the appropriate IOMMU registers, and + * managing the associated omap_hwmod's state and the device's + * reset line. + **/ +static int omap_iommu_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iommu_platform_data *pdata = dev_get_platdata(dev); + struct omap_iommu *obj = to_iommu(dev); + int ret = 0; + + if (pdata && pdata->set_pwrdm_constraint) { + ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); + if (ret) { + dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", + ret); + } + } + + if (pdata && pdata->deassert_reset) { + ret = pdata->deassert_reset(pdev, pdata->reset_name); + if (ret) { + dev_err(dev, "deassert_reset failed: %d\n", ret); + return ret; + } + } + + if (pdata && pdata->device_enable) + pdata->device_enable(pdev); + + ret = omap2_iommu_enable(obj); + + return ret; +} + static bool omap_iommu_can_register(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1001,6 +1045,15 @@ static int omap_iommu_probe(struct platform_device *pdev) if (!obj) return -ENOMEM; + /* + * self-manage the ordering dependencies between omap_device_enable/idle + * and omap_device_assert/deassert_hardreset API + */ + if (pdev->dev.pm_domain) { + dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); + pdev->dev.pm_domain = NULL; + } + obj->name = dev_name(&pdev->dev); obj->nr_tlb_entries = 32; err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); @@ -1089,6 +1142,11 @@ static int omap_iommu_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops omap_iommu_pm_ops = { + SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, + omap_iommu_runtime_resume, NULL) +}; + static const struct of_device_id omap_iommu_of_match[] = { { .compatible = "ti,omap2-iommu" }, { .compatible = "ti,omap4-iommu" }, @@ -1102,6 +1160,7 @@ static struct platform_driver omap_iommu_driver = { .remove = omap_iommu_remove, .driver = { .name = "omap-iommu", + .pm = &omap_iommu_pm_ops, .of_match_table = of_match_ptr(omap_iommu_of_match), }, }; -- cgit v1.2.3 From c3b44a063d3b4d9b5afabaf5ab2e3fc11518dc84 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 7 Aug 2019 11:26:48 +0300 Subject: iommu/omap: add logic to save/restore locked TLBs The MMUs provide a mechanism to lock TLB entries to avoid eviction and fetching of frequently used page table entries. These TLBs lose context when the MMUs are turned OFF. Add the logic to save and restore these locked TLBS during suspend and resume respectively. There are no locked TLBs during initial power ON, and they need not be saved during final shutdown. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 61 +++++++++++++++++++++++++++++++++++++++++++--- drivers/iommu/omap-iommu.h | 3 +++ 2 files changed, 61 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index fbceae3c2ee7..7640f2bd7c81 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -883,15 +883,55 @@ static void omap_iommu_detach(struct omap_iommu *obj) dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, DMA_TO_DEVICE); - iommu_disable(obj); obj->pd_dma = 0; obj->iopgd = NULL; + iommu_disable(obj); spin_unlock(&obj->iommu_lock); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); } +static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) +{ + struct iotlb_lock lock; + struct cr_regs cr; + struct cr_regs *tmp; + int i; + + /* check if there are any locked tlbs to save */ + iotlb_lock_get(obj, &lock); + obj->num_cr_ctx = lock.base; + if (!obj->num_cr_ctx) + return; + + tmp = obj->cr_ctx; + for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) + * tmp++ = cr; +} + +static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) +{ + struct iotlb_lock l; + struct cr_regs *tmp; + int i; + + /* no locked tlbs to restore */ + if (!obj->num_cr_ctx) + return; + + l.base = 0; + tmp = obj->cr_ctx; + for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { + l.vict = i; + iotlb_lock_set(obj, &l); + iotlb_load_cr(obj, tmp); + } + l.base = obj->num_cr_ctx; + l.vict = i; + iotlb_lock_set(obj, &l); +} + /** * omap_iommu_runtime_suspend - disable an iommu device * @dev: iommu device @@ -901,7 +941,8 @@ static void omap_iommu_detach(struct omap_iommu *obj) * device, or during system/runtime suspend of the device. This * includes programming all the appropriate IOMMU registers, and * managing the associated omap_hwmod's state and the device's - * reset line. + * reset line. This function also saves the context of any + * locked TLBs if suspending. **/ static int omap_iommu_runtime_suspend(struct device *dev) { @@ -910,6 +951,10 @@ static int omap_iommu_runtime_suspend(struct device *dev) struct omap_iommu *obj = to_iommu(dev); int ret; + /* save the TLBs only during suspend, and not for power down */ + if (obj->domain && obj->iopgd) + omap_iommu_save_tlb_entries(obj); + omap2_iommu_disable(obj); if (pdata && pdata->device_idle) @@ -938,7 +983,8 @@ static int omap_iommu_runtime_suspend(struct device *dev) * device, or during system/runtime resume of the device. This * includes programming all the appropriate IOMMU registers, and * managing the associated omap_hwmod's state and the device's - * reset line. + * reset line. The function also restores any locked TLBs if + * resuming after a suspend. **/ static int omap_iommu_runtime_resume(struct device *dev) { @@ -966,6 +1012,10 @@ static int omap_iommu_runtime_resume(struct device *dev) if (pdata && pdata->device_enable) pdata->device_enable(pdev); + /* restore the TLBs only during resume, and not for power up */ + if (obj->domain) + omap_iommu_restore_tlb_entries(obj); + ret = omap2_iommu_enable(obj); return ret; @@ -1066,6 +1116,11 @@ static int omap_iommu_probe(struct platform_device *pdev) obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); + obj->cr_ctx = devm_kzalloc(&pdev->dev, + sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, + GFP_KERNEL); + if (!obj->cr_ctx) + return -ENOMEM; spin_lock_init(&obj->iommu_lock); spin_lock_init(&obj->page_table_lock); diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index aac1ca65ef9d..1d15aa857634 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -73,6 +73,9 @@ struct omap_iommu { void *ctx; /* iommu context: registres saved area */ + struct cr_regs *cr_ctx; + u32 num_cr_ctx; + int has_bus_err_back; u32 id; -- cgit v1.2.3 From c4206c4e190bd272e4e86389613f2cded4609ba1 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 7 Aug 2019 11:26:49 +0300 Subject: iommu/omap: Add system suspend/resume support The MMU registers for the remote processors lose their context in Open Switch Retention (OSWR) or device OFF modes. Hence, the context of the IOMMU needs to be saved before it is put into any of these lower power state (OSWR/OFF) and restored before it is powered up to ON again. The IOMMUs need to be active as long as the client devices that are present behind the IOMMU are active. This patch adds the dev_pm_ops callbacks to provide the system suspend/resume functionality through the appropriate runtime PM callbacks. The PM runtime_resume and runtime_suspend callbacks are already used to enable, configure and disable the IOMMUs during the attaching and detaching of the client devices to the IOMMUs, and the new PM callbacks reuse the same code by invoking the pm_runtime_force_suspend() and pm_runtime_force_resume() API. The functionality in dev_pm_ops .prepare() checks if the IOMMU device was already runtime suspended, and skips invoking the suspend/resume PM callbacks. The suspend/resume PM callbacks are plugged in through the 'late' pm ops to ensure that the IOMMU devices will be suspended only after its master devices (remoteproc devices) are suspended and restored before them. NOTE: There are two other existing API, omap_iommu_save_ctx() and omap_iommu_restore_ctx(). These are left as is to support suspend/resume of devices on legacy OMAP3 SoC. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 7640f2bd7c81..ef62ac9057bb 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -65,6 +65,9 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) /** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device + * + * This should be treated as an deprecated API. It is preserved only + * to maintain existing functionality for OMAP3 ISP driver. **/ void omap_iommu_save_ctx(struct device *dev) { @@ -92,6 +95,9 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); /** * omap_iommu_restore_ctx - Restore registers for pm off-mode support * @dev: client device + * + * This should be treated as an deprecated API. It is preserved only + * to maintain existing functionality for OMAP3 ISP driver. **/ void omap_iommu_restore_ctx(struct device *dev) { @@ -1021,6 +1027,23 @@ static int omap_iommu_runtime_resume(struct device *dev) return ret; } +/** + * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation + * @dev: iommu device + * + * This function performs the necessary checks to determine if the IOMMU + * device needs suspending or not. The function checks if the runtime_pm + * status of the device is suspended, and returns 1 in that case. This + * results in the PM core to skip invoking any of the Sleep PM callbacks + * (suspend, suspend_late, resume, resume_early etc). + */ +static int omap_iommu_prepare(struct device *dev) +{ + if (pm_runtime_status_suspended(dev)) + return 1; + return 0; +} + static bool omap_iommu_can_register(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1198,6 +1221,9 @@ static int omap_iommu_remove(struct platform_device *pdev) } static const struct dev_pm_ops omap_iommu_pm_ops = { + .prepare = omap_iommu_prepare, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, omap_iommu_runtime_resume, NULL) }; -- cgit v1.2.3 From d9c4d8a6cc0f852adf3829fbe40e2e3f6213b0c6 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 7 Aug 2019 11:26:50 +0300 Subject: iommu/omap: introduce new API for runtime suspend/resume control This patch adds the support for the OMAP IOMMUs to be suspended during the auto suspend/resume of the OMAP remoteproc devices. The remote processors are auto suspended after a certain time of idle or inactivity period. This is done by introducing two new API, omap_iommu_domain_deactivate() and omap_iommu_domain_activate() to allow the client users/master devices of the IOMMU devices to deactivate & activate the IOMMU devices from their runtime suspend/resume operations. There is no API exposed by the IOMMU layer at present, and so these new API are added directly in the OMAP IOMMU driver to minimize framework changes. The API simply decrements and increments the runtime usage count of the IOMMU devices and let the context be saved/restored using the existing runtime pm callbacks. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/omap-iommu.h | 8 +++++++ 2 files changed, 66 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index ef62ac9057bb..ecf14001beed 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -938,6 +938,64 @@ static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) iotlb_lock_set(obj, &l); } +/** + * omap_iommu_domain_deactivate - deactivate attached iommu devices + * @domain: iommu domain attached to the target iommu device + * + * This API allows the client devices of IOMMU devices to suspend + * the IOMMUs they control at runtime, after they are idled and + * suspended all activity. System Suspend will leverage the PM + * driver late callbacks. + **/ +int omap_iommu_domain_deactivate(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = to_omap_domain(domain); + struct omap_iommu_device *iommu; + struct omap_iommu *oiommu; + int i; + + if (!omap_domain->dev) + return 0; + + iommu = omap_domain->iommus; + iommu += (omap_domain->num_iommus - 1); + for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { + oiommu = iommu->iommu_dev; + pm_runtime_put_sync(oiommu->dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); + +/** + * omap_iommu_domain_activate - activate attached iommu devices + * @domain: iommu domain attached to the target iommu device + * + * This API allows the client devices of IOMMU devices to resume the + * IOMMUs they control at runtime, before they can resume operations. + * System Resume will leverage the PM driver late callbacks. + **/ +int omap_iommu_domain_activate(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = to_omap_domain(domain); + struct omap_iommu_device *iommu; + struct omap_iommu *oiommu; + int i; + + if (!omap_domain->dev) + return 0; + + iommu = omap_domain->iommus; + for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { + oiommu = iommu->iommu_dev; + pm_runtime_get_sync(oiommu->dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); + /** * omap_iommu_runtime_suspend - disable an iommu device * @dev: iommu device diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index 153bf25b4df3..36b645726813 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h @@ -10,12 +10,20 @@ #ifndef _OMAP_IOMMU_H_ #define _OMAP_IOMMU_H_ +struct iommu_domain; + #ifdef CONFIG_OMAP_IOMMU extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); + +int omap_iommu_domain_deactivate(struct iommu_domain *domain); +int omap_iommu_domain_activate(struct iommu_domain *domain); #else static inline void omap_iommu_save_ctx(struct device *dev) {} static inline void omap_iommu_restore_ctx(struct device *dev) {} + +static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain) {} +static inline int omap_iommu_domain_activate(struct iommu_domain *domain) {} #endif #endif -- cgit v1.2.3 From 604629bcb5057d778839652f8f38d23734f2fe1d Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 7 Aug 2019 11:26:51 +0300 Subject: iommu/omap: add support for late attachment of iommu devices Current implementation of OMAP IOMMU enforces strict ordering of device probe, initiated by iommu and followed by remoteproc later. This doesn't work too well with the new setup done with ti-sysc changes which may have the devices probed at pretty much any order. To overcome this limitation, if iommu has not been probed yet when a consumer tries to attach to it, add the device to orphan device list which will be parsed during iommu probe to see if any orphan devices should be attached. Signed-off-by: Tero Kristo Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 56 +++++++++++++++++++++++++++++++++++++++++++--- drivers/iommu/omap-iommu.h | 4 +++- 2 files changed, 56 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index ecf14001beed..3c7ba517dbc3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -35,6 +35,15 @@ static const struct iommu_ops omap_iommu_ops; +struct orphan_dev { + struct device *dev; + struct list_head node; +}; + +static LIST_HEAD(orphan_dev_list); + +static DEFINE_SPINLOCK(orphan_lock); + #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) /* bitmap of the page sizes currently supported */ @@ -53,6 +62,8 @@ static const struct iommu_ops omap_iommu_ops; static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; +static int _omap_iommu_add_device(struct device *dev); + /** * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain * @dom: generic iommu domain handle @@ -1166,6 +1177,7 @@ static int omap_iommu_probe(struct platform_device *pdev) struct omap_iommu *obj; struct resource *res; struct device_node *of = pdev->dev.of_node; + struct orphan_dev *orphan_dev, *tmp; if (!of) { pr_err("%s: only DT-based devices are supported\n", __func__); @@ -1249,6 +1261,14 @@ static int omap_iommu_probe(struct platform_device *pdev) dev_info(&pdev->dev, "%s registered\n", obj->name); + list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) { + err = _omap_iommu_add_device(orphan_dev->dev); + if (!err) { + list_del(&orphan_dev->node); + kfree(orphan_dev); + } + } + return 0; out_sysfs: @@ -1638,7 +1658,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, return ret; } -static int omap_iommu_add_device(struct device *dev) +static int _omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data, *tmp; struct omap_iommu *oiommu; @@ -1647,6 +1667,8 @@ static int omap_iommu_add_device(struct device *dev) struct platform_device *pdev; int num_iommus, i; int ret; + struct orphan_dev *orphan_dev; + unsigned long flags; /* * Allocate the archdata iommu structure for DT-based devices. @@ -1678,10 +1700,26 @@ static int omap_iommu_add_device(struct device *dev) } pdev = of_find_device_by_node(np); - if (WARN_ON(!pdev)) { + if (!pdev) { of_node_put(np); kfree(arch_data); - return -EINVAL; + spin_lock_irqsave(&orphan_lock, flags); + list_for_each_entry(orphan_dev, &orphan_dev_list, + node) { + if (orphan_dev->dev == dev) + break; + } + spin_unlock_irqrestore(&orphan_lock, flags); + + if (orphan_dev && orphan_dev->dev == dev) + return -EPROBE_DEFER; + + orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL); + orphan_dev->dev = dev; + spin_lock_irqsave(&orphan_lock, flags); + list_add(&orphan_dev->node, &orphan_dev_list); + spin_unlock_irqrestore(&orphan_lock, flags); + return -EPROBE_DEFER; } oiommu = platform_get_drvdata(pdev); @@ -1692,6 +1730,7 @@ static int omap_iommu_add_device(struct device *dev) } tmp->iommu_dev = oiommu; + tmp->dev = &pdev->dev; of_node_put(np); } @@ -1726,6 +1765,17 @@ static int omap_iommu_add_device(struct device *dev) return 0; } +static int omap_iommu_add_device(struct device *dev) +{ + int ret; + + ret = _omap_iommu_add_device(dev); + if (ret == -EPROBE_DEFER) + return 0; + + return ret; +} + static void omap_iommu_remove_device(struct device *dev) { struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 1d15aa857634..18ee713ede78 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -87,7 +87,8 @@ struct omap_iommu { /** * struct omap_iommu_arch_data - omap iommu private data - * @iommu_dev: handle of the iommu device + * @iommu_dev: handle of the OMAP iommu device + * @dev: handle of the iommu device * * This is an omap iommu private data object, which binds an iommu user * to its iommu device. This object should be placed at the iommu user's @@ -96,6 +97,7 @@ struct omap_iommu { */ struct omap_iommu_arch_data { struct omap_iommu *iommu_dev; + struct device *dev; }; struct cr_regs { -- cgit v1.2.3 From 1432ebbd60033f48ae24302524f9dca592103804 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 7 Aug 2019 11:26:52 +0300 Subject: iommu/omap: remove pm_runtime_irq_safe flag for OMAP IOMMUs This is not needed for anything, and prevents proper PM transitions for parent devices which is bad in case of ti-sysc; this effectively kills PM completely. Thus, remove the flag. Signed-off-by: Tero Kristo Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3c7ba517dbc3..99a9ff3e7f71 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1254,7 +1254,6 @@ static int omap_iommu_probe(struct platform_device *pdev) goto out_sysfs; } - pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); -- cgit v1.2.3 From 7991eb39eedc2f8565568f02877f7e22c4f89f98 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 12 Aug 2019 12:32:46 +0200 Subject: iommu/exynos: Remove __init annotation from exynos_sysmmu_probe() Exynos SYSMMU driver supports deferred probe. It happens when clocks needed for this driver are not yet available. Typically next calls to driver ->probe() happen before init section is free, but this is not really guaranteed. To make if safe, remove __init annotation from exynos_sysmmu_probe() function. Signed-off-by: Marek Szyprowski Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index b0c1e5f9daae..a48236c1d5cb 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -566,7 +566,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, static const struct iommu_ops exynos_iommu_ops; -static int __init exynos_sysmmu_probe(struct platform_device *pdev) +static int exynos_sysmmu_probe(struct platform_device *pdev) { int irq, ret; struct device *dev = &pdev->dev; -- cgit v1.2.3 From 24ce0bab260b59964629bb4d9570f0548b89e490 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Fri, 16 Aug 2019 17:58:37 -0500 Subject: iommu/omap: Use the correct type for SLAB_HWCACHE_ALIGN The macro SLAB_HWCACHE_ALIGN is of type slab_flags_t, but is currently assigned in the OMAP IOMMU driver using a unsigned long variable. This generates a sparse warning around the type check. Fix this by defining the variable flags using the correct type. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 99a9ff3e7f71..8645e9b175a3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1818,7 +1818,7 @@ static const struct iommu_ops omap_iommu_ops = { static int __init omap_iommu_init(void) { struct kmem_cache *p; - const unsigned long flags = SLAB_HWCACHE_ALIGN; + const slab_flags_t flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; int ret; -- cgit v1.2.3 From 353b325047fdfd437dc2afaee89fc66de3657b7e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:21 +0100 Subject: iommu/arm-smmu: Mask TLBI address correctly The less said about "~12UL" the better. Oh dear. We get away with it due to calling constraints that mean IOVAs are implicitly at least page-aligned to begin with, but still; oh dear. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 64977c131ee6..d60ee292ecee 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -504,7 +504,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { - iova &= ~12UL; + iova = (iova >> 12) << 12; iova |= cfg->asid; do { writel_relaxed(iova, reg); -- cgit v1.2.3 From a5b396ce5001a8b8c8594c7fa90b0df61d6b3272 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:22 +0100 Subject: iommu/qcom: Mask TLBI addresses correctly As with arm-smmu from whence this code was borrowed, the IOVAs passed in here happen to be at least page-aligned anyway, but still; oh dear. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/qcom_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..bed948c3058a 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -155,7 +155,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); size_t s = size; - iova &= ~12UL; + iova = (iova >> 12) << 12; iova |= ctx->asid; do { iommu_writel(ctx, reg, iova); -- cgit v1.2.3 From 0caf5f4e84fe0a870917984abf1f0eb29fa6e375 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:23 +0100 Subject: iommu/arm-smmu: Convert GR0 registers to bitfields FIELD_PREP remains a terrible name, but the overall simplification will make further work on this stuff that much more manageable. This also serves as an audit of the header, wherein we can impose a consistent grouping and ordering of the offset and field definitions Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-regs.h | 126 +++++++++++++++++++----------------------- drivers/iommu/arm-smmu.c | 51 ++++++++--------- 2 files changed, 84 insertions(+), 93 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h index 1c278f7ae888..351ab09c7d4f 100644 --- a/drivers/iommu/arm-smmu-regs.h +++ b/drivers/iommu/arm-smmu-regs.h @@ -10,111 +10,101 @@ #ifndef _ARM_SMMU_REGS_H #define _ARM_SMMU_REGS_H +#include + /* Configuration registers */ #define ARM_SMMU_GR0_sCR0 0x0 -#define sCR0_CLIENTPD (1 << 0) -#define sCR0_GFRE (1 << 1) -#define sCR0_GFIE (1 << 2) -#define sCR0_EXIDENABLE (1 << 3) -#define sCR0_GCFGFRE (1 << 4) -#define sCR0_GCFGFIE (1 << 5) -#define sCR0_USFCFG (1 << 10) -#define sCR0_VMIDPNE (1 << 11) -#define sCR0_PTM (1 << 12) -#define sCR0_FB (1 << 13) -#define sCR0_VMID16EN (1 << 31) -#define sCR0_BSU_SHIFT 14 -#define sCR0_BSU_MASK 0x3 +#define sCR0_VMID16EN BIT(31) +#define sCR0_BSU GENMASK(15, 14) +#define sCR0_FB BIT(13) +#define sCR0_PTM BIT(12) +#define sCR0_VMIDPNE BIT(11) +#define sCR0_USFCFG BIT(10) +#define sCR0_GCFGFIE BIT(5) +#define sCR0_GCFGFRE BIT(4) +#define sCR0_EXIDENABLE BIT(3) +#define sCR0_GFIE BIT(2) +#define sCR0_GFRE BIT(1) +#define sCR0_CLIENTPD BIT(0) /* Auxiliary Configuration register */ #define ARM_SMMU_GR0_sACR 0x10 /* Identification registers */ #define ARM_SMMU_GR0_ID0 0x20 +#define ID0_S1TS BIT(30) +#define ID0_S2TS BIT(29) +#define ID0_NTS BIT(28) +#define ID0_SMS BIT(27) +#define ID0_ATOSNS BIT(26) +#define ID0_PTFS_NO_AARCH32 BIT(25) +#define ID0_PTFS_NO_AARCH32S BIT(24) +#define ID0_NUMIRPT GENMASK(23, 16) +#define ID0_CTTW BIT(14) +#define ID0_NUMSIDB GENMASK(12, 9) +#define ID0_EXIDS BIT(8) +#define ID0_NUMSMRG GENMASK(7, 0) + #define ARM_SMMU_GR0_ID1 0x24 +#define ID1_PAGESIZE BIT(31) +#define ID1_NUMPAGENDXB GENMASK(30, 28) +#define ID1_NUMS2CB GENMASK(23, 16) +#define ID1_NUMCB GENMASK(7, 0) + #define ARM_SMMU_GR0_ID2 0x28 +#define ID2_VMID16 BIT(15) +#define ID2_PTFS_64K BIT(14) +#define ID2_PTFS_16K BIT(13) +#define ID2_PTFS_4K BIT(12) +#define ID2_UBS GENMASK(11, 8) +#define ID2_OAS GENMASK(7, 4) +#define ID2_IAS GENMASK(3, 0) + #define ARM_SMMU_GR0_ID3 0x2c #define ARM_SMMU_GR0_ID4 0x30 #define ARM_SMMU_GR0_ID5 0x34 #define ARM_SMMU_GR0_ID6 0x38 + #define ARM_SMMU_GR0_ID7 0x3c +#define ID7_MAJOR GENMASK(7, 4) +#define ID7_MINOR GENMASK(3, 0) + #define ARM_SMMU_GR0_sGFSR 0x48 #define ARM_SMMU_GR0_sGFSYNR0 0x50 #define ARM_SMMU_GR0_sGFSYNR1 0x54 #define ARM_SMMU_GR0_sGFSYNR2 0x58 -#define ID0_S1TS (1 << 30) -#define ID0_S2TS (1 << 29) -#define ID0_NTS (1 << 28) -#define ID0_SMS (1 << 27) -#define ID0_ATOSNS (1 << 26) -#define ID0_PTFS_NO_AARCH32 (1 << 25) -#define ID0_PTFS_NO_AARCH32S (1 << 24) -#define ID0_CTTW (1 << 14) -#define ID0_NUMIRPT_SHIFT 16 -#define ID0_NUMIRPT_MASK 0xff -#define ID0_NUMSIDB_SHIFT 9 -#define ID0_NUMSIDB_MASK 0xf -#define ID0_EXIDS (1 << 8) -#define ID0_NUMSMRG_SHIFT 0 -#define ID0_NUMSMRG_MASK 0xff - -#define ID1_PAGESIZE (1 << 31) -#define ID1_NUMPAGENDXB_SHIFT 28 -#define ID1_NUMPAGENDXB_MASK 7 -#define ID1_NUMS2CB_SHIFT 16 -#define ID1_NUMS2CB_MASK 0xff -#define ID1_NUMCB_SHIFT 0 -#define ID1_NUMCB_MASK 0xff - -#define ID2_OAS_SHIFT 4 -#define ID2_OAS_MASK 0xf -#define ID2_IAS_SHIFT 0 -#define ID2_IAS_MASK 0xf -#define ID2_UBS_SHIFT 8 -#define ID2_UBS_MASK 0xf -#define ID2_PTFS_4K (1 << 12) -#define ID2_PTFS_16K (1 << 13) -#define ID2_PTFS_64K (1 << 14) -#define ID2_VMID16 (1 << 15) - -#define ID7_MAJOR_SHIFT 4 -#define ID7_MAJOR_MASK 0xf - /* Global TLB invalidation */ #define ARM_SMMU_GR0_TLBIVMID 0x64 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 #define ARM_SMMU_GR0_TLBIALLH 0x6c #define ARM_SMMU_GR0_sTLBGSYNC 0x70 + #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 -#define sTLBGSTATUS_GSACTIVE (1 << 0) +#define sTLBGSTATUS_GSACTIVE BIT(0) /* Stream mapping registers */ #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) -#define SMR_VALID (1 << 31) -#define SMR_MASK_SHIFT 16 -#define SMR_ID_SHIFT 0 +#define SMR_VALID BIT(31) +#define SMR_MASK GENMASK(31, 16) +#define SMR_ID GENMASK(15, 0) #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) -#define S2CR_CBNDX_SHIFT 0 -#define S2CR_CBNDX_MASK 0xff -#define S2CR_EXIDVALID (1 << 10) -#define S2CR_TYPE_SHIFT 16 -#define S2CR_TYPE_MASK 0x3 -enum arm_smmu_s2cr_type { - S2CR_TYPE_TRANS, - S2CR_TYPE_BYPASS, - S2CR_TYPE_FAULT, -}; - -#define S2CR_PRIVCFG_SHIFT 24 -#define S2CR_PRIVCFG_MASK 0x3 +#define S2CR_PRIVCFG GENMASK(25, 24) enum arm_smmu_s2cr_privcfg { S2CR_PRIVCFG_DEFAULT, S2CR_PRIVCFG_DIPAN, S2CR_PRIVCFG_UNPRIV, S2CR_PRIVCFG_PRIV, }; +#define S2CR_TYPE GENMASK(17, 16) +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS, + S2CR_TYPE_BYPASS, + S2CR_TYPE_FAULT, +}; +#define S2CR_EXIDVALID BIT(10) +#define S2CR_CBNDX GENMASK(7, 0) /* Context bank attribute registers */ #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d60ee292ecee..105015798c06 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -1019,7 +1020,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_smr *smr = smmu->smrs + idx; - u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; + u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask); if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) reg |= SMR_VALID; @@ -1029,9 +1030,9 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; - u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT | - (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | - (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; + u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) | + FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) | + FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg); if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) @@ -1063,15 +1064,15 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ - smr = smmu->streamid_mask << SMR_ID_SHIFT; + smr = FIELD_PREP(SMR_ID, smmu->streamid_mask); writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->streamid_mask = smr >> SMR_ID_SHIFT; + smmu->streamid_mask = FIELD_GET(SMR_ID, smr); - smr = smmu->streamid_mask << SMR_MASK_SHIFT; + smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask); writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; + smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr); } static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) @@ -1140,8 +1141,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ for_each_cfg_sme(fwspec, i, idx) { - u16 sid = fwspec->ids[i]; - u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; + u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]); + u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]); if (idx != INVALID_SMENDX) { ret = -EEXIST; @@ -1466,8 +1467,8 @@ static int arm_smmu_add_device(struct device *dev) ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { - u16 sid = fwspec->ids[i]; - u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; + u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]); + u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]); if (sid & ~smmu->streamid_mask) { dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", @@ -1648,12 +1649,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) u32 mask, fwid = 0; if (args->args_count > 0) - fwid |= (u16)args->args[0]; + fwid |= FIELD_PREP(SMR_ID, args->args[0]); if (args->args_count > 1) - fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; + fwid |= FIELD_PREP(SMR_MASK, args->args[1]); else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) - fwid |= (u16)mask << SMR_MASK_SHIFT; + fwid |= FIELD_PREP(SMR_MASK, mask); return iommu_fwspec_add_ids(dev, &fwid, 1); } @@ -1728,7 +1729,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) * bit is only present in MMU-500r2 onwards. */ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; + major = FIELD_GET(ID7_MAJOR, reg); reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR); if (major >= 2) reg &= ~ARM_MMU500_ACR_CACHE_LOCK; @@ -1780,7 +1781,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) reg &= ~sCR0_FB; /* Don't upgrade barriers */ - reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + reg &= ~(sCR0_BSU); if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= sCR0_VMID16EN; @@ -1879,12 +1880,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->features |= ARM_SMMU_FEAT_EXIDS; size = 1 << 16; } else { - size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); + size = 1 << FIELD_GET(ID0_NUMSIDB, id); } smmu->streamid_mask = size - 1; if (id & ID0_SMS) { smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; - size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; + size = FIELD_GET(ID0_NUMSMRG, id); if (size == 0) { dev_err(smmu->dev, "stream-matching supported, but no SMRs present!\n"); @@ -1923,15 +1924,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; /* Check for size mismatch of SMMU address space from mapped region */ - size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1); size <<= smmu->pgshift; if (smmu->cb_base != gr0_base + size) dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", size * 2, (smmu->cb_base - gr0_base) * 2); - smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; - smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; + smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id); + smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id); if (smmu->num_s2_context_banks > smmu->num_context_banks) { dev_err(smmu->dev, "impossible number of S2 context banks!\n"); return -ENODEV; @@ -1957,11 +1958,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) /* ID2 */ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); - size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); + size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id)); smmu->ipa_size = size; /* The output mask is also applied for bypass */ - size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); + size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id)); smmu->pa_size = size; if (id & ID2_VMID16) @@ -1981,7 +1982,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) if (smmu->version == ARM_SMMU_V1_64K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; } else { - size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; + size = FIELD_GET(ID2_UBS, id); smmu->va_size = arm_smmu_id_size_to_bits(size); if (id & ID2_PTFS_4K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; -- cgit v1.2.3 From 5114e96cb27ed71559f2b674bb3c6ee14eafe9f9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:24 +0100 Subject: iommu/arm-smmu: Convert GR1 registers to bitfields As for GR0, use the bitfield helpers to make GR1 usage a little cleaner, and use it as an opportunity to audit and tidy the definitions. This tweaks the handling of CBAR types to match what we did for S2CR a while back, and fixes a couple of names which didn't quite match the latest architecture spec (IHI0062D.c). Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-regs.h | 33 ++++++++++++++------------------- drivers/iommu/arm-smmu.c | 18 +++++++++--------- 2 files changed, 23 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h index 351ab09c7d4f..8522330ee624 100644 --- a/drivers/iommu/arm-smmu-regs.h +++ b/drivers/iommu/arm-smmu-regs.h @@ -108,30 +108,25 @@ enum arm_smmu_s2cr_type { /* Context bank attribute registers */ #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) -#define CBAR_VMID_SHIFT 0 -#define CBAR_VMID_MASK 0xff -#define CBAR_S1_BPSHCFG_SHIFT 8 -#define CBAR_S1_BPSHCFG_MASK 3 -#define CBAR_S1_BPSHCFG_NSH 3 -#define CBAR_S1_MEMATTR_SHIFT 12 -#define CBAR_S1_MEMATTR_MASK 0xf +#define CBAR_IRPTNDX GENMASK(31, 24) +#define CBAR_TYPE GENMASK(17, 16) +enum arm_smmu_cbar_type { + CBAR_TYPE_S2_TRANS, + CBAR_TYPE_S1_TRANS_S2_BYPASS, + CBAR_TYPE_S1_TRANS_S2_FAULT, + CBAR_TYPE_S1_TRANS_S2_TRANS, +}; +#define CBAR_S1_MEMATTR GENMASK(15, 12) #define CBAR_S1_MEMATTR_WB 0xf -#define CBAR_TYPE_SHIFT 16 -#define CBAR_TYPE_MASK 0x3 -#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) -#define CBAR_IRPTNDX_SHIFT 24 -#define CBAR_IRPTNDX_MASK 0xff +#define CBAR_S1_BPSHCFG GENMASK(9, 8) +#define CBAR_S1_BPSHCFG_NSH 3 +#define CBAR_VMID GENMASK(7, 0) #define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) -#define CBA2R_RW64_32BIT (0 << 0) -#define CBA2R_RW64_64BIT (1 << 0) -#define CBA2R_VMID_SHIFT 16 -#define CBA2R_VMID_MASK 0xffff +#define CBA2R_VMID16 GENMASK(31, 16) +#define CBA2R_VA64 BIT(0) #define ARM_SMMU_CB_SCTLR 0x0 #define ARM_SMMU_CB_ACTLR 0x4 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 105015798c06..293a95b0d682 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -237,7 +237,7 @@ struct arm_smmu_cfg { u16 asid; u16 vmid; }; - u32 cbar; + enum arm_smmu_cbar_type cbar; enum arm_smmu_context_fmt fmt; }; #define INVALID_IRPTNDX 0xff @@ -692,31 +692,31 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) /* CBA2R */ if (smmu->version > ARM_SMMU_V1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - reg = CBA2R_RW64_64BIT; + reg = CBA2R_VA64; else - reg = CBA2R_RW64_32BIT; + reg = 0; /* 16-bit VMIDs live in CBA2R */ if (smmu->features & ARM_SMMU_FEAT_VMID16) - reg |= cfg->vmid << CBA2R_VMID_SHIFT; + reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid); writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx)); } /* CBAR */ - reg = cfg->cbar; + reg = FIELD_PREP(CBAR_TYPE, cfg->cbar); if (smmu->version < ARM_SMMU_V2) - reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; + reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx); /* * Use the weakest shareability/memory types, so they are * overridden by the ttbcr/pte. */ if (stage1) { - reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | - (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); + reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) | + FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB); } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { /* 8-bit VMIDs live in CBAR */ - reg |= cfg->vmid << CBAR_VMID_SHIFT; + reg |= FIELD_PREP(CBAR_VMID, cfg->vmid); } writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx)); -- cgit v1.2.3 From 620565a76bb31657045a6d4cc37f9df0b4ab5f56 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:25 +0100 Subject: iommu/arm-smmu: Convert context bank registers to bitfields Finish the final part of the job, once again updating some names to match the current spec. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-regs.h | 86 ++++++++++++++++++++++--------------------- drivers/iommu/arm-smmu.c | 16 ++++---- drivers/iommu/qcom_iommu.c | 13 ++++--- 3 files changed, 59 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h index 8522330ee624..a8e288192285 100644 --- a/drivers/iommu/arm-smmu-regs.h +++ b/drivers/iommu/arm-smmu-regs.h @@ -129,19 +129,59 @@ enum arm_smmu_cbar_type { #define CBA2R_VA64 BIT(0) #define ARM_SMMU_CB_SCTLR 0x0 +#define SCTLR_S1_ASIDPNE BIT(12) +#define SCTLR_CFCFG BIT(7) +#define SCTLR_CFIE BIT(6) +#define SCTLR_CFRE BIT(5) +#define SCTLR_E BIT(4) +#define SCTLR_AFE BIT(2) +#define SCTLR_TRE BIT(1) +#define SCTLR_M BIT(0) + #define ARM_SMMU_CB_ACTLR 0x4 + #define ARM_SMMU_CB_RESUME 0x8 -#define ARM_SMMU_CB_TTBCR2 0x10 +#define RESUME_TERMINATE BIT(0) + +#define ARM_SMMU_CB_TCR2 0x10 +#define TCR2_SEP GENMASK(17, 15) +#define TCR2_SEP_UPSTREAM 0x7 +#define TCR2_AS BIT(4) + #define ARM_SMMU_CB_TTBR0 0x20 #define ARM_SMMU_CB_TTBR1 0x28 -#define ARM_SMMU_CB_TTBCR 0x30 +#define TTBRn_ASID GENMASK_ULL(63, 48) + +#define ARM_SMMU_CB_TCR 0x30 #define ARM_SMMU_CB_CONTEXTIDR 0x34 #define ARM_SMMU_CB_S1_MAIR0 0x38 #define ARM_SMMU_CB_S1_MAIR1 0x3c + #define ARM_SMMU_CB_PAR 0x50 +#define CB_PAR_F BIT(0) + #define ARM_SMMU_CB_FSR 0x58 +#define FSR_MULTI BIT(31) +#define FSR_SS BIT(30) +#define FSR_UUT BIT(8) +#define FSR_ASF BIT(7) +#define FSR_TLBLKF BIT(6) +#define FSR_TLBMCF BIT(5) +#define FSR_EF BIT(4) +#define FSR_PF BIT(3) +#define FSR_AFF BIT(2) +#define FSR_TF BIT(1) + +#define FSR_IGN (FSR_AFF | FSR_ASF | \ + FSR_TLBMCF | FSR_TLBLKF) +#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ + FSR_EF | FSR_PF | FSR_TF | FSR_IGN) + #define ARM_SMMU_CB_FAR 0x60 + #define ARM_SMMU_CB_FSYNR0 0x68 +#define FSYNR0_WNR BIT(4) + #define ARM_SMMU_CB_S1_TLBIVA 0x600 #define ARM_SMMU_CB_S1_TLBIASID 0x610 #define ARM_SMMU_CB_S1_TLBIVAL 0x620 @@ -150,46 +190,8 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_TLBSYNC 0x7f0 #define ARM_SMMU_CB_TLBSTATUS 0x7f4 #define ARM_SMMU_CB_ATS1PR 0x800 -#define ARM_SMMU_CB_ATSR 0x8f0 - -#define SCTLR_S1_ASIDPNE (1 << 12) -#define SCTLR_CFCFG (1 << 7) -#define SCTLR_CFIE (1 << 6) -#define SCTLR_CFRE (1 << 5) -#define SCTLR_E (1 << 4) -#define SCTLR_AFE (1 << 2) -#define SCTLR_TRE (1 << 1) -#define SCTLR_M (1 << 0) - -#define CB_PAR_F (1 << 0) - -#define ATSR_ACTIVE (1 << 0) - -#define RESUME_RETRY (0 << 0) -#define RESUME_TERMINATE (1 << 0) - -#define TTBCR2_SEP_SHIFT 15 -#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) -#define TTBCR2_AS (1 << 4) - -#define TTBRn_ASID_SHIFT 48 -#define FSR_MULTI (1 << 31) -#define FSR_SS (1 << 30) -#define FSR_UUT (1 << 8) -#define FSR_ASF (1 << 7) -#define FSR_TLBLKF (1 << 6) -#define FSR_TLBMCF (1 << 5) -#define FSR_EF (1 << 4) -#define FSR_PF (1 << 3) -#define FSR_AFF (1 << 2) -#define FSR_TF (1 << 1) - -#define FSR_IGN (FSR_AFF | FSR_ASF | \ - FSR_TLBMCF | FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ - FSR_EF | FSR_PF | FSR_TF | FSR_IGN) - -#define FSYNR0_WNR (1 << 4) +#define ARM_SMMU_CB_ATSR 0x8f0 +#define ATSR_ACTIVE BIT(0) #endif /* _ARM_SMMU_REGS_H */ diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 293a95b0d682..a877de006d02 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -628,16 +628,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, cb->cfg = cfg; - /* TTBCR */ + /* TCR */ if (stage1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; } else { cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr; cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; - cb->tcr[1] |= TTBCR2_SEP_UPSTREAM; + cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM); if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - cb->tcr[1] |= TTBCR2_AS; + cb->tcr[1] |= TCR2_AS; } } else { cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; @@ -650,9 +650,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; } else { cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid); cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; - cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid); } } else { cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; @@ -721,13 +721,13 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx)); /* - * TTBCR + * TCR * We must write this before the TTBRs, since it determines the * access behaviour of some fields (in particular, ASID[15:8]). */ if (stage1 && smmu->version > ARM_SMMU_V1) - writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2); - writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR); + writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TCR2); + writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TCR); /* TTBRs */ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index bed948c3058a..60a125dd7300 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -247,16 +248,16 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, /* TTBRs */ iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | - ((u64)ctx->asid << TTBRn_ASID_SHIFT)); + FIELD_PREP(TTBRn_ASID, ctx->asid)); iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] | - ((u64)ctx->asid << TTBRn_ASID_SHIFT)); + FIELD_PREP(TTBRn_ASID, ctx->asid)); - /* TTBCR */ - iommu_writel(ctx, ARM_SMMU_CB_TTBCR2, + /* TCR */ + iommu_writel(ctx, ARM_SMMU_CB_TCR2, (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | - TTBCR2_SEP_UPSTREAM); - iommu_writel(ctx, ARM_SMMU_CB_TTBCR, + FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM)); + iommu_writel(ctx, ARM_SMMU_CB_TCR, pgtbl_cfg.arm_lpae_s1_cfg.tcr); /* MAIRs (stage-1 only) */ -- cgit v1.2.3 From 490325e0c123b2f1f393a679805c580ee69cd2f6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:26 +0100 Subject: iommu/arm-smmu: Rework cb_base handling To keep register-access quirks manageable, we want to structure things to avoid needing too many individual overrides. It seems fairly clean to have a single interface which handles both global and context registers in terms of the architectural pages, so the first preparatory step is to rework cb_base into a page number rather than an absolute address. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a877de006d02..19126230c780 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -95,7 +95,7 @@ #endif /* Translation context bank */ -#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) +#define ARM_SMMU_CB(smmu, n) ((smmu)->base + (((smmu)->numpage + (n)) << (smmu)->pgshift)) #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -168,8 +168,8 @@ struct arm_smmu_device { struct device *dev; void __iomem *base; - void __iomem *cb_base; - unsigned long pgshift; + unsigned int numpage; + unsigned int pgshift; #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) @@ -1815,7 +1815,7 @@ static int arm_smmu_id_size_to_bits(int size) static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) { - unsigned long size; + unsigned int size; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 id; bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; @@ -1899,7 +1899,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; dev_notice(smmu->dev, - "\tstream matching with %lu register groups", size); + "\tstream matching with %u register groups", size); } /* s2cr->type == 0 means translation, so initialise explicitly */ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), @@ -1925,11 +1925,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) /* Check for size mismatch of SMMU address space from mapped region */ size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1); - size <<= smmu->pgshift; - if (smmu->cb_base != gr0_base + size) + if (smmu->numpage != 2 * size << smmu->pgshift) dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", - size * 2, (smmu->cb_base - gr0_base) * 2); + "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n", + 2 * size << smmu->pgshift, smmu->numpage); + /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */ + smmu->numpage = size; smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id); smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id); @@ -2200,7 +2201,11 @@ static int arm_smmu_device_probe(struct platform_device *pdev) smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); - smmu->cb_base = smmu->base + resource_size(res) / 2; + /* + * The resource size should effectively match the value of SMMU_TOP; + * stash that temporarily until we know PAGESIZE to validate it with. + */ + smmu->numpage = resource_size(res); num_irqs = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { -- cgit v1.2.3 From 71e8a8cdaff995a46e3e186a736636747cbd2f50 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:27 +0100 Subject: iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync() Since we now use separate iommu_gather_ops for stage 1 and stage 2 contexts, we may as well divide up the monolithic callback into its respective stage 1 and stage 2 parts. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 64 +++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 19126230c780..5b12e96d7878 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -490,46 +490,54 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) arm_smmu_tlb_sync_global(smmu); } -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); + void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx); - if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - if (stage1) { - reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; - - if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { - iova = (iova >> 12) << 12; - iova |= cfg->asid; - do { - writel_relaxed(iova, reg); - iova += granule; - } while (size -= granule); - } else { - iova >>= 12; - iova |= (u64)cfg->asid << 48; - do { - writeq_relaxed(iova, reg); - iova += granule >> 12; - } while (size -= granule); - } + reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + + if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { + iova = (iova >> 12) << 12; + iova |= cfg->asid; + do { + writel_relaxed(iova, reg); + iova += granule; + } while (size -= granule); } else { - reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : - ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; + iova |= (u64)cfg->asid << 48; do { - smmu_write_atomic_lq(iova, reg); + writeq_relaxed(iova, reg); iova += granule >> 12; } while (size -= granule); } } +static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); + + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + + reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; + iova >>= 12; + do { + smmu_write_atomic_lq(iova, reg); + iova += granule >> 12; + } while (size -= granule); +} + /* * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead @@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_add_flush = arm_smmu_tlb_inv_range_s1, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_add_flush = arm_smmu_tlb_inv_range_s2, .tlb_sync = arm_smmu_tlb_sync_context, }; -- cgit v1.2.3 From 6100576284e9df139477817e1a207b5fae1a3df8 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:28 +0100 Subject: iommu/arm-smmu: Get rid of weird "atomic" write The smmu_write_atomic_lq oddity made some sense when the context format was effectively tied to CONFIG_64BIT, but these days it's simpler to just pick an explicit access size based on the format for the one-and-a-half times we actually care. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 5b12e96d7878..24b4de1a4185 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -83,17 +83,6 @@ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ ? 0x400 : 0)) -/* - * Some 64-bit registers only make sense to write atomically, but in such - * cases all the data relevant to AArch32 formats lies within the lower word, - * therefore this actually makes more sense than it might first appear. - */ -#ifdef CONFIG_64BIT -#define smmu_write_atomic_lq writeq_relaxed -#else -#define smmu_write_atomic_lq writel_relaxed -#endif - /* Translation context bank */ #define ARM_SMMU_CB(smmu, n) ((smmu)->base + (((smmu)->numpage + (n)) << (smmu)->pgshift)) @@ -533,7 +522,10 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; do { - smmu_write_atomic_lq(iova, reg); + if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) + writeq_relaxed(iova, reg); + else + writel_relaxed(iova, reg); iova += granule >> 12; } while (size -= granule); } @@ -1371,11 +1363,10 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); spin_lock_irqsave(&smmu_domain->cb_lock, flags); - /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; - if (smmu->version == ARM_SMMU_V2) - smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR); - else /* Register is only 32-bit in v1 */ + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) + writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); + else writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, -- cgit v1.2.3 From aadbf2143aeb61368f4b9240b4f8960a0d08c473 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:29 +0100 Subject: iommu/arm-smmu: Abstract GR1 accesses Introduce some register access abstractions which we will later use to encapsulate various quirks. GR1 is the easiest page to start with. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 24b4de1a4185..d612dda2889f 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -71,7 +71,6 @@ /* SMMU global address space */ #define ARM_SMMU_GR0(smmu) ((smmu)->base) -#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) /* * SMMU global address space with conditional offset to access secure @@ -250,6 +249,29 @@ struct arm_smmu_domain { struct iommu_domain domain; }; +static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) +{ + return smmu->base + (n << smmu->pgshift); +} + +static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) +{ + return readl_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset, + u32 val) +{ + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +#define ARM_SMMU_GR1 1 + +#define arm_smmu_gr1_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR1, (o)) +#define arm_smmu_gr1_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v)) + struct arm_smmu_option_prop { u32 opt; const char *prop; @@ -574,7 +596,6 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr1_base = ARM_SMMU_GR1(smmu); void __iomem *cb_base; cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); @@ -585,7 +606,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); - cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx)); + cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx)); dev_err_ratelimited(smmu->dev, "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", @@ -676,7 +697,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) bool stage1; struct arm_smmu_cb *cb = &smmu->cbs[idx]; struct arm_smmu_cfg *cfg = cb->cfg; - void __iomem *cb_base, *gr1_base; + void __iomem *cb_base; cb_base = ARM_SMMU_CB(smmu, idx); @@ -686,7 +707,6 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) return; } - gr1_base = ARM_SMMU_GR1(smmu); stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; /* CBA2R */ @@ -699,7 +719,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid); - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx)); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg); } /* CBAR */ @@ -718,7 +738,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) /* 8-bit VMIDs live in CBAR */ reg |= FIELD_PREP(CBAR_VMID, cfg->vmid); } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx)); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg); /* * TCR -- cgit v1.2.3 From 19713fd40df8e65759e836129671be5f6f21c626 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:30 +0100 Subject: iommu/arm-smmu: Abstract context bank accesses Context bank accesses are fiddly enough to deserve a number of extra helpers to keep the callsites looking sane, even though there are only one or two of each. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 138 +++++++++++++++++++++++++---------------------- 1 file changed, 73 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d612dda2889f..e72554f334ee 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -82,9 +82,6 @@ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ ? 0x400 : 0)) -/* Translation context bank */ -#define ARM_SMMU_CB(smmu, n) ((smmu)->base + (((smmu)->numpage + (n)) << (smmu)->pgshift)) - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -265,13 +262,34 @@ static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset, writel_relaxed(val, arm_smmu_page(smmu, page) + offset); } +static u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) +{ + return readq_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset, + u64 val) +{ + writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + #define ARM_SMMU_GR1 1 +#define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) #define arm_smmu_gr1_read(s, o) \ arm_smmu_readl((s), ARM_SMMU_GR1, (o)) #define arm_smmu_gr1_write(s, o, v) \ arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v)) +#define arm_smmu_cb_read(s, n, o) \ + arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_write(s, n, o, v) \ + arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v)) +#define arm_smmu_cb_readq(s, n, o) \ + arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_writeq(s, n, o, v) \ + arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) + struct arm_smmu_option_prop { u32 opt; const char *prop; @@ -427,15 +445,17 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) } /* Wait for any pending TLB invalidations to complete */ -static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, - void __iomem *sync, void __iomem *status) +static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, + int sync, int status) { unsigned int spin_cnt, delay; + u32 reg; - writel_relaxed(QCOM_DUMMY_VAL, sync); + arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { - if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) + reg = arm_smmu_readl(smmu, page, status); + if (!(reg & sTLBGSTATUS_GSACTIVE)) return; cpu_relax(); } @@ -447,12 +467,11 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) { - void __iomem *base = ARM_SMMU_GR0(smmu); unsigned long flags; spin_lock_irqsave(&smmu->global_sync_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, - base + ARM_SMMU_GR0_sTLBGSTATUS); + __arm_smmu_tlb_sync(smmu, 0, ARM_SMMU_GR0_sTLBGSYNC, + ARM_SMMU_GR0_sTLBGSTATUS); spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } @@ -460,12 +479,11 @@ static void arm_smmu_tlb_sync_context(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); unsigned long flags; spin_lock_irqsave(&smmu_domain->cb_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, - base + ARM_SMMU_CB_TLBSTATUS); + __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), + ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } @@ -479,14 +497,13 @@ static void arm_smmu_tlb_sync_vmid(void *cookie) static void arm_smmu_tlb_inv_context_s1(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); - /* - * NOTE: this is not a relaxed write; it needs to guarantee that PTEs - * cleared by the current CPU are visible to the SMMU before the TLBI. + * The TLBI write may be relaxed, so ensure that PTEs cleared by the + * current CPU are visible beforehand. */ - writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); + wmb(); + arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, + ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); arm_smmu_tlb_sync_context(cookie); } @@ -507,25 +524,25 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx); + int reg, idx = cfg->cbndx; if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { iova = (iova >> 12) << 12; iova |= cfg->asid; do { - writel_relaxed(iova, reg); + arm_smmu_cb_write(smmu, idx, reg, iova); iova += granule; } while (size -= granule); } else { iova >>= 12; iova |= (u64)cfg->asid << 48; do { - writeq_relaxed(iova, reg); + arm_smmu_cb_writeq(smmu, idx, reg, iova); iova += granule >> 12; } while (size -= granule); } @@ -536,18 +553,18 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); + int reg, idx = smmu_domain->cfg.cbndx; if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; + reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; do { if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) - writeq_relaxed(iova, reg); + arm_smmu_cb_writeq(smmu, idx, reg, iova); else - writel_relaxed(iova, reg); + arm_smmu_cb_write(smmu, idx, reg, iova); iova += granule >> 12; } while (size -= granule); } @@ -594,25 +611,22 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) unsigned long iova; struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *cb_base; - - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); - fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); + int idx = smmu_domain->cfg.cbndx; + fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); if (!(fsr & FSR_FAULT)) return IRQ_NONE; - fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); - iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); - cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx)); + fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); + iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); + cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); dev_err_ratelimited(smmu->dev, "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", - fsr, iova, fsynr, cbfrsynra, cfg->cbndx); + fsr, iova, fsynr, cbfrsynra, idx); - writel(fsr, cb_base + ARM_SMMU_CB_FSR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); return IRQ_HANDLED; } @@ -697,13 +711,10 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) bool stage1; struct arm_smmu_cb *cb = &smmu->cbs[idx]; struct arm_smmu_cfg *cfg = cb->cfg; - void __iomem *cb_base; - - cb_base = ARM_SMMU_CB(smmu, idx); /* Unassigned context banks only need disabling */ if (!cfg) { - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0); return; } @@ -746,24 +757,25 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) * access behaviour of some fields (in particular, ASID[15:8]). */ if (stage1 && smmu->version > ARM_SMMU_V1) - writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TCR2); - writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TCR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); /* TTBRs */ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); - writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); - writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); } else { - writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); if (stage1) - writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1, + cb->ttbr[1]); } /* MAIRs (stage-1 only) */ if (stage1) { - writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0); - writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); } /* SCTLR */ @@ -773,7 +785,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= SCTLR_E; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); } static int arm_smmu_init_domain_context(struct iommu_domain *domain, @@ -1370,27 +1382,25 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; struct device *dev = smmu->dev; - void __iomem *cb_base; + void __iomem *reg; u32 tmp; u64 phys; unsigned long va, flags; - int ret; + int ret, idx = cfg->cbndx; ret = arm_smmu_rpm_get(smmu); if (ret < 0) return 0; - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); - spin_lock_irqsave(&smmu_domain->cb_lock, flags); va = iova & ~0xfffUL; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); else - writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); - if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, - !(tmp & ATSR_ACTIVE), 5, 50)) { + reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; + if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) { spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); dev_err(dev, "iova to phys timed out on %pad. Falling back to software table walk.\n", @@ -1398,7 +1408,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, return ops->iova_to_phys(ops, iova); } - phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); + phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); if (phys & CB_PAR_F) { dev_err(dev, "translation fault!\n"); @@ -1762,18 +1772,16 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { - void __iomem *cb_base = ARM_SMMU_CB(smmu, i); - arm_smmu_write_context_bank(smmu, i); - writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT); /* * Disable MMU-500's not-particularly-beneficial next-page * prefetcher for the sake of errata #841119 and #826419. */ if (smmu->model == ARM_MMU500) { - reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR); + reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); reg &= ~ARM_MMU500_ACTLR_CPRE; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR); + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); } } -- cgit v1.2.3 From 00320ce6505821b405992c241ebe4c79f178bf8f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:31 +0100 Subject: iommu/arm-smmu: Abstract GR0 accesses Clean up the remaining accesses to GR0 registers, so that everything is now neatly abstracted. This folds up the Non-Secure alias quirk as the first step towards moving it out of the way entirely. Although GR0 does technically contain some 64-bit registers (sGFAR and the weird SMMUv2 HYPC and MONC stuff), they're not ones we have any need to access. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 106 ++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e72554f334ee..e9fd9117109e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -69,19 +69,6 @@ /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 -/* SMMU global address space */ -#define ARM_SMMU_GR0(smmu) ((smmu)->base) - -/* - * SMMU global address space with conditional offset to access secure - * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, - * nsGFSYNR0: 0x450) - */ -#define ARM_SMMU_GR0_NS(smmu) \ - ((smmu)->base + \ - ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ - ? 0x400 : 0)) - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -246,6 +233,21 @@ struct arm_smmu_domain { struct iommu_domain domain; }; +static int arm_smmu_gr0_ns(int offset) +{ + switch(offset) { + case ARM_SMMU_GR0_sCR0: + case ARM_SMMU_GR0_sACR: + case ARM_SMMU_GR0_sGFSR: + case ARM_SMMU_GR0_sGFSYNR0: + case ARM_SMMU_GR0_sGFSYNR1: + case ARM_SMMU_GR0_sGFSYNR2: + return offset + 0x400; + default: + return offset; + } +} + static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) { return smmu->base + (n << smmu->pgshift); @@ -253,12 +255,18 @@ static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) { + if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0) + offset = arm_smmu_gr0_ns(offset); + return readl_relaxed(arm_smmu_page(smmu, page) + offset); } static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset, u32 val) { + if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0) + offset = arm_smmu_gr0_ns(offset); + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); } @@ -273,9 +281,15 @@ static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset, writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); } +#define ARM_SMMU_GR0 0 #define ARM_SMMU_GR1 1 #define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) +#define arm_smmu_gr0_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR0, (o)) +#define arm_smmu_gr0_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v)) + #define arm_smmu_gr1_read(s, o) \ arm_smmu_readl((s), ARM_SMMU_GR1, (o)) #define arm_smmu_gr1_write(s, o, v) \ @@ -470,7 +484,7 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) unsigned long flags; spin_lock_irqsave(&smmu->global_sync_lock, flags); - __arm_smmu_tlb_sync(smmu, 0, ARM_SMMU_GR0_sTLBGSYNC, + __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC, ARM_SMMU_GR0_sTLBGSTATUS); spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } @@ -511,10 +525,10 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_GR0(smmu); - /* NOTE: see above */ - writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + /* See above */ + wmb(); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); arm_smmu_tlb_sync_global(smmu); } @@ -579,12 +593,12 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { @@ -634,12 +648,11 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) { u32 gfsr, gfsynr0, gfsynr1, gfsynr2; struct arm_smmu_device *smmu = dev; - void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); - gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); - gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); - gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); - gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); + gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); + gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1); + gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2); if (!gfsr) return IRQ_NONE; @@ -650,7 +663,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", gfsr, gfsynr0, gfsynr1, gfsynr2); - writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); return IRQ_HANDLED; } @@ -1056,7 +1069,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) reg |= SMR_VALID; - writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg); } static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) @@ -1069,7 +1082,7 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) reg |= S2CR_EXIDVALID; - writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); } static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) @@ -1085,7 +1098,6 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) */ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) { - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 smr; if (!smmu->smrs) @@ -1097,13 +1109,13 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) * masters later if they try to claim IDs outside these masks. */ smr = FIELD_PREP(SMR_ID, smmu->streamid_mask); - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr); + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0)); smmu->streamid_mask = FIELD_GET(SMR_ID, smr); smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask); - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr); + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr); } @@ -1736,13 +1748,12 @@ static struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); int i; u32 reg, major; /* clear global FSR */ - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg); /* * Reset stream mapping groups: Initial values mark all SMRn as @@ -1757,9 +1768,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK * bit is only present in MMU-500r2 onwards. */ - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); major = FIELD_GET(ID7_MAJOR, reg); - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); if (major >= 2) reg &= ~ARM_MMU500_ACR_CACHE_LOCK; /* @@ -1767,7 +1778,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) * TLB entries for reduced latency. */ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg); } /* Make sure all context banks are disabled and clear CB_FSR */ @@ -1786,10 +1797,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) } /* Invalidate the TLB, just in case */ - writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL); - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); /* Enable fault reporting */ reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); @@ -1818,7 +1829,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Push the button */ arm_smmu_tlb_sync_global(smmu); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); } static int arm_smmu_id_size_to_bits(int size) @@ -1843,7 +1854,6 @@ static int arm_smmu_id_size_to_bits(int size) static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) { unsigned int size; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 id; bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; int i; @@ -1853,7 +1863,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->version == ARM_SMMU_V2 ? 2 : 1); /* ID0 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0); /* Restrict available stages based on module parameter */ if (force_stage == 1) @@ -1947,7 +1957,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) } /* ID1 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1); smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; /* Check for size mismatch of SMMU address space from mapped region */ @@ -1985,7 +1995,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; /* ID2 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2); size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id)); smmu->ipa_size = size; @@ -2372,7 +2382,7 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev) arm_smmu_rpm_get(smmu); /* Turn the thing off */ - writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD); arm_smmu_rpm_put(smmu); if (pm_runtime_enabled(smmu->dev)) -- cgit v1.2.3 From c5fc64881f073504581ac55132e30f02ee61320b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:32 +0100 Subject: iommu/arm-smmu: Rename arm-smmu-regs.h We're about to start using it for more than just register definitions, so generalise the name. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-regs.h | 197 ------------------------------------------ drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/arm-smmu.h | 197 ++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/qcom_iommu.c | 2 +- 4 files changed, 199 insertions(+), 199 deletions(-) delete mode 100644 drivers/iommu/arm-smmu-regs.h create mode 100644 drivers/iommu/arm-smmu.h (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h deleted file mode 100644 index a8e288192285..000000000000 --- a/drivers/iommu/arm-smmu-regs.h +++ /dev/null @@ -1,197 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * IOMMU API for ARM architected SMMU implementations. - * - * Copyright (C) 2013 ARM Limited - * - * Author: Will Deacon - */ - -#ifndef _ARM_SMMU_REGS_H -#define _ARM_SMMU_REGS_H - -#include - -/* Configuration registers */ -#define ARM_SMMU_GR0_sCR0 0x0 -#define sCR0_VMID16EN BIT(31) -#define sCR0_BSU GENMASK(15, 14) -#define sCR0_FB BIT(13) -#define sCR0_PTM BIT(12) -#define sCR0_VMIDPNE BIT(11) -#define sCR0_USFCFG BIT(10) -#define sCR0_GCFGFIE BIT(5) -#define sCR0_GCFGFRE BIT(4) -#define sCR0_EXIDENABLE BIT(3) -#define sCR0_GFIE BIT(2) -#define sCR0_GFRE BIT(1) -#define sCR0_CLIENTPD BIT(0) - -/* Auxiliary Configuration register */ -#define ARM_SMMU_GR0_sACR 0x10 - -/* Identification registers */ -#define ARM_SMMU_GR0_ID0 0x20 -#define ID0_S1TS BIT(30) -#define ID0_S2TS BIT(29) -#define ID0_NTS BIT(28) -#define ID0_SMS BIT(27) -#define ID0_ATOSNS BIT(26) -#define ID0_PTFS_NO_AARCH32 BIT(25) -#define ID0_PTFS_NO_AARCH32S BIT(24) -#define ID0_NUMIRPT GENMASK(23, 16) -#define ID0_CTTW BIT(14) -#define ID0_NUMSIDB GENMASK(12, 9) -#define ID0_EXIDS BIT(8) -#define ID0_NUMSMRG GENMASK(7, 0) - -#define ARM_SMMU_GR0_ID1 0x24 -#define ID1_PAGESIZE BIT(31) -#define ID1_NUMPAGENDXB GENMASK(30, 28) -#define ID1_NUMS2CB GENMASK(23, 16) -#define ID1_NUMCB GENMASK(7, 0) - -#define ARM_SMMU_GR0_ID2 0x28 -#define ID2_VMID16 BIT(15) -#define ID2_PTFS_64K BIT(14) -#define ID2_PTFS_16K BIT(13) -#define ID2_PTFS_4K BIT(12) -#define ID2_UBS GENMASK(11, 8) -#define ID2_OAS GENMASK(7, 4) -#define ID2_IAS GENMASK(3, 0) - -#define ARM_SMMU_GR0_ID3 0x2c -#define ARM_SMMU_GR0_ID4 0x30 -#define ARM_SMMU_GR0_ID5 0x34 -#define ARM_SMMU_GR0_ID6 0x38 - -#define ARM_SMMU_GR0_ID7 0x3c -#define ID7_MAJOR GENMASK(7, 4) -#define ID7_MINOR GENMASK(3, 0) - -#define ARM_SMMU_GR0_sGFSR 0x48 -#define ARM_SMMU_GR0_sGFSYNR0 0x50 -#define ARM_SMMU_GR0_sGFSYNR1 0x54 -#define ARM_SMMU_GR0_sGFSYNR2 0x58 - -/* Global TLB invalidation */ -#define ARM_SMMU_GR0_TLBIVMID 0x64 -#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 -#define ARM_SMMU_GR0_TLBIALLH 0x6c -#define ARM_SMMU_GR0_sTLBGSYNC 0x70 - -#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 -#define sTLBGSTATUS_GSACTIVE BIT(0) - -/* Stream mapping registers */ -#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) -#define SMR_VALID BIT(31) -#define SMR_MASK GENMASK(31, 16) -#define SMR_ID GENMASK(15, 0) - -#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) -#define S2CR_PRIVCFG GENMASK(25, 24) -enum arm_smmu_s2cr_privcfg { - S2CR_PRIVCFG_DEFAULT, - S2CR_PRIVCFG_DIPAN, - S2CR_PRIVCFG_UNPRIV, - S2CR_PRIVCFG_PRIV, -}; -#define S2CR_TYPE GENMASK(17, 16) -enum arm_smmu_s2cr_type { - S2CR_TYPE_TRANS, - S2CR_TYPE_BYPASS, - S2CR_TYPE_FAULT, -}; -#define S2CR_EXIDVALID BIT(10) -#define S2CR_CBNDX GENMASK(7, 0) - -/* Context bank attribute registers */ -#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) -#define CBAR_IRPTNDX GENMASK(31, 24) -#define CBAR_TYPE GENMASK(17, 16) -enum arm_smmu_cbar_type { - CBAR_TYPE_S2_TRANS, - CBAR_TYPE_S1_TRANS_S2_BYPASS, - CBAR_TYPE_S1_TRANS_S2_FAULT, - CBAR_TYPE_S1_TRANS_S2_TRANS, -}; -#define CBAR_S1_MEMATTR GENMASK(15, 12) -#define CBAR_S1_MEMATTR_WB 0xf -#define CBAR_S1_BPSHCFG GENMASK(9, 8) -#define CBAR_S1_BPSHCFG_NSH 3 -#define CBAR_VMID GENMASK(7, 0) - -#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) - -#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) -#define CBA2R_VMID16 GENMASK(31, 16) -#define CBA2R_VA64 BIT(0) - -#define ARM_SMMU_CB_SCTLR 0x0 -#define SCTLR_S1_ASIDPNE BIT(12) -#define SCTLR_CFCFG BIT(7) -#define SCTLR_CFIE BIT(6) -#define SCTLR_CFRE BIT(5) -#define SCTLR_E BIT(4) -#define SCTLR_AFE BIT(2) -#define SCTLR_TRE BIT(1) -#define SCTLR_M BIT(0) - -#define ARM_SMMU_CB_ACTLR 0x4 - -#define ARM_SMMU_CB_RESUME 0x8 -#define RESUME_TERMINATE BIT(0) - -#define ARM_SMMU_CB_TCR2 0x10 -#define TCR2_SEP GENMASK(17, 15) -#define TCR2_SEP_UPSTREAM 0x7 -#define TCR2_AS BIT(4) - -#define ARM_SMMU_CB_TTBR0 0x20 -#define ARM_SMMU_CB_TTBR1 0x28 -#define TTBRn_ASID GENMASK_ULL(63, 48) - -#define ARM_SMMU_CB_TCR 0x30 -#define ARM_SMMU_CB_CONTEXTIDR 0x34 -#define ARM_SMMU_CB_S1_MAIR0 0x38 -#define ARM_SMMU_CB_S1_MAIR1 0x3c - -#define ARM_SMMU_CB_PAR 0x50 -#define CB_PAR_F BIT(0) - -#define ARM_SMMU_CB_FSR 0x58 -#define FSR_MULTI BIT(31) -#define FSR_SS BIT(30) -#define FSR_UUT BIT(8) -#define FSR_ASF BIT(7) -#define FSR_TLBLKF BIT(6) -#define FSR_TLBMCF BIT(5) -#define FSR_EF BIT(4) -#define FSR_PF BIT(3) -#define FSR_AFF BIT(2) -#define FSR_TF BIT(1) - -#define FSR_IGN (FSR_AFF | FSR_ASF | \ - FSR_TLBMCF | FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ - FSR_EF | FSR_PF | FSR_TF | FSR_IGN) - -#define ARM_SMMU_CB_FAR 0x60 - -#define ARM_SMMU_CB_FSYNR0 0x68 -#define FSYNR0_WNR BIT(4) - -#define ARM_SMMU_CB_S1_TLBIVA 0x600 -#define ARM_SMMU_CB_S1_TLBIASID 0x610 -#define ARM_SMMU_CB_S1_TLBIVAL 0x620 -#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 -#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 -#define ARM_SMMU_CB_TLBSYNC 0x7f0 -#define ARM_SMMU_CB_TLBSTATUS 0x7f4 -#define ARM_SMMU_CB_ATS1PR 0x800 - -#define ARM_SMMU_CB_ATSR 0x8f0 -#define ATSR_ACTIVE BIT(0) - -#endif /* _ARM_SMMU_REGS_H */ diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e9fd9117109e..f3b8301a3059 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -46,7 +46,7 @@ #include #include -#include "arm-smmu-regs.h" +#include "arm-smmu.h" /* * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h new file mode 100644 index 000000000000..ccc3097a4247 --- /dev/null +++ b/drivers/iommu/arm-smmu.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IOMMU API for ARM architected SMMU implementations. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon + */ + +#ifndef _ARM_SMMU_H +#define _ARM_SMMU_H + +#include + +/* Configuration registers */ +#define ARM_SMMU_GR0_sCR0 0x0 +#define sCR0_VMID16EN BIT(31) +#define sCR0_BSU GENMASK(15, 14) +#define sCR0_FB BIT(13) +#define sCR0_PTM BIT(12) +#define sCR0_VMIDPNE BIT(11) +#define sCR0_USFCFG BIT(10) +#define sCR0_GCFGFIE BIT(5) +#define sCR0_GCFGFRE BIT(4) +#define sCR0_EXIDENABLE BIT(3) +#define sCR0_GFIE BIT(2) +#define sCR0_GFRE BIT(1) +#define sCR0_CLIENTPD BIT(0) + +/* Auxiliary Configuration register */ +#define ARM_SMMU_GR0_sACR 0x10 + +/* Identification registers */ +#define ARM_SMMU_GR0_ID0 0x20 +#define ID0_S1TS BIT(30) +#define ID0_S2TS BIT(29) +#define ID0_NTS BIT(28) +#define ID0_SMS BIT(27) +#define ID0_ATOSNS BIT(26) +#define ID0_PTFS_NO_AARCH32 BIT(25) +#define ID0_PTFS_NO_AARCH32S BIT(24) +#define ID0_NUMIRPT GENMASK(23, 16) +#define ID0_CTTW BIT(14) +#define ID0_NUMSIDB GENMASK(12, 9) +#define ID0_EXIDS BIT(8) +#define ID0_NUMSMRG GENMASK(7, 0) + +#define ARM_SMMU_GR0_ID1 0x24 +#define ID1_PAGESIZE BIT(31) +#define ID1_NUMPAGENDXB GENMASK(30, 28) +#define ID1_NUMS2CB GENMASK(23, 16) +#define ID1_NUMCB GENMASK(7, 0) + +#define ARM_SMMU_GR0_ID2 0x28 +#define ID2_VMID16 BIT(15) +#define ID2_PTFS_64K BIT(14) +#define ID2_PTFS_16K BIT(13) +#define ID2_PTFS_4K BIT(12) +#define ID2_UBS GENMASK(11, 8) +#define ID2_OAS GENMASK(7, 4) +#define ID2_IAS GENMASK(3, 0) + +#define ARM_SMMU_GR0_ID3 0x2c +#define ARM_SMMU_GR0_ID4 0x30 +#define ARM_SMMU_GR0_ID5 0x34 +#define ARM_SMMU_GR0_ID6 0x38 + +#define ARM_SMMU_GR0_ID7 0x3c +#define ID7_MAJOR GENMASK(7, 4) +#define ID7_MINOR GENMASK(3, 0) + +#define ARM_SMMU_GR0_sGFSR 0x48 +#define ARM_SMMU_GR0_sGFSYNR0 0x50 +#define ARM_SMMU_GR0_sGFSYNR1 0x54 +#define ARM_SMMU_GR0_sGFSYNR2 0x58 + +/* Global TLB invalidation */ +#define ARM_SMMU_GR0_TLBIVMID 0x64 +#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 +#define ARM_SMMU_GR0_TLBIALLH 0x6c +#define ARM_SMMU_GR0_sTLBGSYNC 0x70 + +#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 +#define sTLBGSTATUS_GSACTIVE BIT(0) + +/* Stream mapping registers */ +#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) +#define SMR_VALID BIT(31) +#define SMR_MASK GENMASK(31, 16) +#define SMR_ID GENMASK(15, 0) + +#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) +#define S2CR_PRIVCFG GENMASK(25, 24) +enum arm_smmu_s2cr_privcfg { + S2CR_PRIVCFG_DEFAULT, + S2CR_PRIVCFG_DIPAN, + S2CR_PRIVCFG_UNPRIV, + S2CR_PRIVCFG_PRIV, +}; +#define S2CR_TYPE GENMASK(17, 16) +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS, + S2CR_TYPE_BYPASS, + S2CR_TYPE_FAULT, +}; +#define S2CR_EXIDVALID BIT(10) +#define S2CR_CBNDX GENMASK(7, 0) + +/* Context bank attribute registers */ +#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) +#define CBAR_IRPTNDX GENMASK(31, 24) +#define CBAR_TYPE GENMASK(17, 16) +enum arm_smmu_cbar_type { + CBAR_TYPE_S2_TRANS, + CBAR_TYPE_S1_TRANS_S2_BYPASS, + CBAR_TYPE_S1_TRANS_S2_FAULT, + CBAR_TYPE_S1_TRANS_S2_TRANS, +}; +#define CBAR_S1_MEMATTR GENMASK(15, 12) +#define CBAR_S1_MEMATTR_WB 0xf +#define CBAR_S1_BPSHCFG GENMASK(9, 8) +#define CBAR_S1_BPSHCFG_NSH 3 +#define CBAR_VMID GENMASK(7, 0) + +#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) + +#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) +#define CBA2R_VMID16 GENMASK(31, 16) +#define CBA2R_VA64 BIT(0) + +#define ARM_SMMU_CB_SCTLR 0x0 +#define SCTLR_S1_ASIDPNE BIT(12) +#define SCTLR_CFCFG BIT(7) +#define SCTLR_CFIE BIT(6) +#define SCTLR_CFRE BIT(5) +#define SCTLR_E BIT(4) +#define SCTLR_AFE BIT(2) +#define SCTLR_TRE BIT(1) +#define SCTLR_M BIT(0) + +#define ARM_SMMU_CB_ACTLR 0x4 + +#define ARM_SMMU_CB_RESUME 0x8 +#define RESUME_TERMINATE BIT(0) + +#define ARM_SMMU_CB_TCR2 0x10 +#define TCR2_SEP GENMASK(17, 15) +#define TCR2_SEP_UPSTREAM 0x7 +#define TCR2_AS BIT(4) + +#define ARM_SMMU_CB_TTBR0 0x20 +#define ARM_SMMU_CB_TTBR1 0x28 +#define TTBRn_ASID GENMASK_ULL(63, 48) + +#define ARM_SMMU_CB_TCR 0x30 +#define ARM_SMMU_CB_CONTEXTIDR 0x34 +#define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_S1_MAIR1 0x3c + +#define ARM_SMMU_CB_PAR 0x50 +#define CB_PAR_F BIT(0) + +#define ARM_SMMU_CB_FSR 0x58 +#define FSR_MULTI BIT(31) +#define FSR_SS BIT(30) +#define FSR_UUT BIT(8) +#define FSR_ASF BIT(7) +#define FSR_TLBLKF BIT(6) +#define FSR_TLBMCF BIT(5) +#define FSR_EF BIT(4) +#define FSR_PF BIT(3) +#define FSR_AFF BIT(2) +#define FSR_TF BIT(1) + +#define FSR_IGN (FSR_AFF | FSR_ASF | \ + FSR_TLBMCF | FSR_TLBLKF) +#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ + FSR_EF | FSR_PF | FSR_TF | FSR_IGN) + +#define ARM_SMMU_CB_FAR 0x60 + +#define ARM_SMMU_CB_FSYNR0 0x68 +#define FSYNR0_WNR BIT(4) + +#define ARM_SMMU_CB_S1_TLBIVA 0x600 +#define ARM_SMMU_CB_S1_TLBIASID 0x610 +#define ARM_SMMU_CB_S1_TLBIVAL 0x620 +#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 +#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 +#define ARM_SMMU_CB_TLBSYNC 0x7f0 +#define ARM_SMMU_CB_TLBSTATUS 0x7f4 +#define ARM_SMMU_CB_ATS1PR 0x800 + +#define ARM_SMMU_CB_ATSR 0x8f0 +#define ATSR_ACTIVE BIT(0) + +#endif /* _ARM_SMMU_H */ diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 60a125dd7300..a2062d13584f 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -33,7 +33,7 @@ #include #include -#include "arm-smmu-regs.h" +#include "arm-smmu.h" #define SMMU_INTR_SEL_NS 0x2000 -- cgit v1.2.3 From fc058d37b3450db3e146d475f85e6afd51888997 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:33 +0100 Subject: iommu/arm-smmu: Add implementation infrastructure Add some nascent infrastructure for handling implementation-specific details outside the flow of the architectural code. This will allow us to keep mutually-incompatible vendor-specific hooks in their own files where the respective interested parties can maintain them with minimal chance of conflicts. As somewhat of a template, we'll start with a general place to collect the relatively trivial existing quirks. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- MAINTAINERS | 3 +- drivers/iommu/Makefile | 2 +- drivers/iommu/arm-smmu-impl.c | 13 +++++++ drivers/iommu/arm-smmu.c | 82 ++------------------------------------- drivers/iommu/arm-smmu.h | 89 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 108 insertions(+), 81 deletions(-) create mode 100644 drivers/iommu/arm-smmu-impl.c (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..289fb0654c8b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1350,8 +1350,7 @@ M: Will Deacon R: Robin Murphy L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/iommu/arm-smmu.c -F: drivers/iommu/arm-smmu-v3.c +F: drivers/iommu/arm-smmu* F: drivers/iommu/io-pgtable-arm.c F: drivers/iommu/io-pgtable-arm-v7s.c diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f13f36ae1af6..a2729aadd300 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm-smmu.o +obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c new file mode 100644 index 000000000000..efeb6d78da17 --- /dev/null +++ b/drivers/iommu/arm-smmu-impl.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Miscellaneous Arm SMMU implementation and integration quirks +// Copyright (C) 2019 Arm Limited + +#define pr_fmt(fmt) "arm-smmu: " fmt + +#include "arm-smmu.h" + + +struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) +{ + return smmu; +} diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f3b8301a3059..1e8153182830 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -29,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -41,7 +39,6 @@ #include #include #include -#include #include #include @@ -66,9 +63,6 @@ #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ #define TLB_SPIN_COUNT 10 -/* Maximum number of context banks per SMMU */ -#define ARM_SMMU_MAX_CBS 128 - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -86,19 +80,6 @@ module_param(disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); -enum arm_smmu_arch_version { - ARM_SMMU_V1, - ARM_SMMU_V1_64K, - ARM_SMMU_V2, -}; - -enum arm_smmu_implementation { - GENERIC_SMMU, - ARM_MMU500, - CAVIUM_SMMUV2, - QCOM_SMMUV2, -}; - struct arm_smmu_s2cr { struct iommu_group *group; int count; @@ -136,65 +117,6 @@ struct arm_smmu_master_cfg { #define for_each_cfg_sme(fw, i, idx) \ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) -struct arm_smmu_device { - struct device *dev; - - void __iomem *base; - unsigned int numpage; - unsigned int pgshift; - -#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) -#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) -#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) -#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) -#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) -#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) -#define ARM_SMMU_FEAT_VMID16 (1 << 6) -#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7) -#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8) -#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) -#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) -#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) -#define ARM_SMMU_FEAT_EXIDS (1 << 12) - u32 features; - -#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) - u32 options; - enum arm_smmu_arch_version version; - enum arm_smmu_implementation model; - - u32 num_context_banks; - u32 num_s2_context_banks; - DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); - struct arm_smmu_cb *cbs; - atomic_t irptndx; - - u32 num_mapping_groups; - u16 streamid_mask; - u16 smr_mask_mask; - struct arm_smmu_smr *smrs; - struct arm_smmu_s2cr *s2crs; - struct mutex stream_map_mutex; - - unsigned long va_size; - unsigned long ipa_size; - unsigned long pa_size; - unsigned long pgsize_bitmap; - - u32 num_global_irqs; - u32 num_context_irqs; - unsigned int *irqs; - struct clk_bulk_data *clks; - int num_clks; - - u32 cavium_id_base; /* Specific to Cavium */ - - spinlock_t global_sync_lock; - - /* IOMMU core code handle */ - struct iommu_device iommu; -}; - enum arm_smmu_context_fmt { ARM_SMMU_CTX_FMT_NONE, ARM_SMMU_CTX_FMT_AARCH64, @@ -2233,6 +2155,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; + smmu = arm_smmu_impl_init(smmu); + if (IS_ERR(smmu)) + return PTR_ERR(smmu); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index ccc3097a4247..6fea0b0b7e51 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -10,7 +10,14 @@ #ifndef _ARM_SMMU_H #define _ARM_SMMU_H +#include #include +#include +#include +#include +#include +#include +#include /* Configuration registers */ #define ARM_SMMU_GR0_sCR0 0x0 @@ -194,4 +201,86 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_ATSR 0x8f0 #define ATSR_ACTIVE BIT(0) + +/* Maximum number of context banks per SMMU */ +#define ARM_SMMU_MAX_CBS 128 + + +/* Shared driver definitions */ +enum arm_smmu_arch_version { + ARM_SMMU_V1, + ARM_SMMU_V1_64K, + ARM_SMMU_V2, +}; + +enum arm_smmu_implementation { + GENERIC_SMMU, + ARM_MMU500, + CAVIUM_SMMUV2, + QCOM_SMMUV2, +}; + +struct arm_smmu_device { + struct device *dev; + + void __iomem *base; + unsigned int numpage; + unsigned int pgshift; + +#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) +#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) +#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) +#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) +#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) +#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) +#define ARM_SMMU_FEAT_VMID16 (1 << 6) +#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7) +#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8) +#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) +#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) +#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) +#define ARM_SMMU_FEAT_EXIDS (1 << 12) + u32 features; + +#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) + u32 options; + enum arm_smmu_arch_version version; + enum arm_smmu_implementation model; + + u32 num_context_banks; + u32 num_s2_context_banks; + DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); + struct arm_smmu_cb *cbs; + atomic_t irptndx; + + u32 num_mapping_groups; + u16 streamid_mask; + u16 smr_mask_mask; + struct arm_smmu_smr *smrs; + struct arm_smmu_s2cr *s2crs; + struct mutex stream_map_mutex; + + unsigned long va_size; + unsigned long ipa_size; + unsigned long pa_size; + unsigned long pgsize_bitmap; + + u32 num_global_irqs; + u32 num_context_irqs; + unsigned int *irqs; + struct clk_bulk_data *clks; + int num_clks; + + u32 cavium_id_base; /* Specific to Cavium */ + + spinlock_t global_sync_lock; + + /* IOMMU core code handle */ + struct iommu_device iommu; +}; + + +/* Implementation details, yay! */ +struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); + #endif /* _ARM_SMMU_H */ -- cgit v1.2.3 From 6d7dff62afb0c7a880860148a8984d0cddc6e589 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:34 +0100 Subject: iommu/arm-smmu: Move Secure access quirk to implementation Move detection of the Secure access quirk to its new home, trimming it down in the process - time has proven that boolean DT flags are neither ideal nor necessarily sufficient, so it's highly unlikely we'll ever add more, let alone enough to justify the frankly overengineered parsing machinery. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-impl.c | 44 ++++++++++++++++++++ drivers/iommu/arm-smmu.c | 97 ------------------------------------------- drivers/iommu/arm-smmu.h | 72 +++++++++++++++++++++++++++++++- 3 files changed, 114 insertions(+), 99 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index efeb6d78da17..0657c85580cb 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -4,10 +4,54 @@ #define pr_fmt(fmt) "arm-smmu: " fmt +#include + #include "arm-smmu.h" +static int arm_smmu_gr0_ns(int offset) +{ + switch(offset) { + case ARM_SMMU_GR0_sCR0: + case ARM_SMMU_GR0_sACR: + case ARM_SMMU_GR0_sGFSR: + case ARM_SMMU_GR0_sGFSYNR0: + case ARM_SMMU_GR0_sGFSYNR1: + case ARM_SMMU_GR0_sGFSYNR2: + return offset + 0x400; + default: + return offset; + } +} + +static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, + int offset) +{ + if (page == ARM_SMMU_GR0) + offset = arm_smmu_gr0_ns(offset); + return readl_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, + int offset, u32 val) +{ + if (page == ARM_SMMU_GR0) + offset = arm_smmu_gr0_ns(offset); + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +/* Since we don't care for sGFAR, we can do without 64-bit accessors */ +const struct arm_smmu_impl calxeda_impl = { + .read_reg = arm_smmu_read_ns, + .write_reg = arm_smmu_write_ns, +}; + + struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) { + if (of_property_read_bool(smmu->dev->of_node, + "calxeda,smmu-secure-config-access")) + smmu->impl = &calxeda_impl; + return smmu; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1e8153182830..432d781f05f3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -155,91 +155,10 @@ struct arm_smmu_domain { struct iommu_domain domain; }; -static int arm_smmu_gr0_ns(int offset) -{ - switch(offset) { - case ARM_SMMU_GR0_sCR0: - case ARM_SMMU_GR0_sACR: - case ARM_SMMU_GR0_sGFSR: - case ARM_SMMU_GR0_sGFSYNR0: - case ARM_SMMU_GR0_sGFSYNR1: - case ARM_SMMU_GR0_sGFSYNR2: - return offset + 0x400; - default: - return offset; - } -} - -static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) -{ - return smmu->base + (n << smmu->pgshift); -} - -static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) -{ - if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0) - offset = arm_smmu_gr0_ns(offset); - - return readl_relaxed(arm_smmu_page(smmu, page) + offset); -} - -static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset, - u32 val) -{ - if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0) - offset = arm_smmu_gr0_ns(offset); - - writel_relaxed(val, arm_smmu_page(smmu, page) + offset); -} - -static u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) -{ - return readq_relaxed(arm_smmu_page(smmu, page) + offset); -} - -static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset, - u64 val) -{ - writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); -} - -#define ARM_SMMU_GR0 0 -#define ARM_SMMU_GR1 1 -#define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) - -#define arm_smmu_gr0_read(s, o) \ - arm_smmu_readl((s), ARM_SMMU_GR0, (o)) -#define arm_smmu_gr0_write(s, o, v) \ - arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v)) - -#define arm_smmu_gr1_read(s, o) \ - arm_smmu_readl((s), ARM_SMMU_GR1, (o)) -#define arm_smmu_gr1_write(s, o, v) \ - arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v)) - -#define arm_smmu_cb_read(s, n, o) \ - arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o)) -#define arm_smmu_cb_write(s, n, o, v) \ - arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v)) -#define arm_smmu_cb_readq(s, n, o) \ - arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o)) -#define arm_smmu_cb_writeq(s, n, o, v) \ - arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) - -struct arm_smmu_option_prop { - u32 opt; - const char *prop; -}; - static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); static bool using_legacy_binding, using_generic_binding; -static struct arm_smmu_option_prop arm_smmu_options[] = { - { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, - { 0, NULL}, -}; - static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) { if (pm_runtime_enabled(smmu->dev)) @@ -259,20 +178,6 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) return container_of(dom, struct arm_smmu_domain, domain); } -static void parse_driver_options(struct arm_smmu_device *smmu) -{ - int i = 0; - - do { - if (of_property_read_bool(smmu->dev->of_node, - arm_smmu_options[i].prop)) { - smmu->options |= arm_smmu_options[i].opt; - dev_notice(smmu->dev, "option %s\n", - arm_smmu_options[i].prop); - } - } while (arm_smmu_options[++i].opt); -} - static struct device_node *dev_get_dev_node(struct device *dev) { if (dev_is_pci(dev)) { @@ -2091,8 +1996,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, smmu->version = data->version; smmu->model = data->model; - parse_driver_options(smmu); - legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); if (legacy_binding && !using_generic_binding) { if (!using_legacy_binding) diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 6fea0b0b7e51..d4fd29d70705 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -242,10 +242,9 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_EXIDS (1 << 12) u32 features; -#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) - u32 options; enum arm_smmu_arch_version version; enum arm_smmu_implementation model; + const struct arm_smmu_impl *impl; u32 num_context_banks; u32 num_s2_context_banks; @@ -281,6 +280,75 @@ struct arm_smmu_device { /* Implementation details, yay! */ +struct arm_smmu_impl { + u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset); + void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset, + u32 val); + u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); + void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, + u64 val); +}; + +static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) +{ + return smmu->base + (n << smmu->pgshift); +} + +static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) +{ + if (smmu->impl && unlikely(smmu->impl->read_reg)) + return smmu->impl->read_reg(smmu, page, offset); + return readl_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page, + int offset, u32 val) +{ + if (smmu->impl && unlikely(smmu->impl->write_reg)) + smmu->impl->write_reg(smmu, page, offset, val); + else + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) +{ + if (smmu->impl && unlikely(smmu->impl->read_reg64)) + return smmu->impl->read_reg64(smmu, page, offset); + return readq_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, + int offset, u64 val) +{ + if (smmu->impl && unlikely(smmu->impl->write_reg64)) + smmu->impl->write_reg64(smmu, page, offset, val); + else + writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +#define ARM_SMMU_GR0 0 +#define ARM_SMMU_GR1 1 +#define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) + +#define arm_smmu_gr0_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR0, (o)) +#define arm_smmu_gr0_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v)) + +#define arm_smmu_gr1_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR1, (o)) +#define arm_smmu_gr1_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v)) + +#define arm_smmu_cb_read(s, n, o) \ + arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_write(s, n, o, v) \ + arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v)) +#define arm_smmu_cb_readq(s, n, o) \ + arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_writeq(s, n, o, v) \ + arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) + struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); #endif /* _ARM_SMMU_H */ -- cgit v1.2.3 From 3995e18689fda2434863bdf2a8cd19acea4e61f0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:35 +0100 Subject: iommu/arm-smmu: Add configuration implementation hook Probing the ID registers and setting up the SMMU configuration is an area where overrides and workarounds may well be needed. Indeed, the Cavium workaround detection lives there at the moment, so let's break that out. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-impl.c | 34 ++++++++++++++++++++++++++++++++++ drivers/iommu/arm-smmu.c | 17 +++-------------- drivers/iommu/arm-smmu.h | 1 + 3 files changed, 38 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index 0657c85580cb..696417908793 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -47,8 +47,42 @@ const struct arm_smmu_impl calxeda_impl = { }; +static int cavium_cfg_probe(struct arm_smmu_device *smmu) +{ + static atomic_t context_count = ATOMIC_INIT(0); + /* + * Cavium CN88xx erratum #27704. + * Ensure ASID and VMID allocation is unique across all SMMUs in + * the system. + */ + smmu->cavium_id_base = atomic_fetch_add(smmu->num_context_banks, + &context_count); + dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); + + return 0; +} + +const struct arm_smmu_impl cavium_impl = { + .cfg_probe = cavium_cfg_probe, +}; + + struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) { + /* + * We will inevitably have to combine model-specific implementation + * quirks with platform-specific integration quirks, but everything + * we currently support happens to work out as straightforward + * mutually-exclusive assignments. + */ + switch (smmu->model) { + case CAVIUM_SMMUV2: + smmu->impl = &cavium_impl; + break; + default: + break; + } + if (of_property_read_bool(smmu->dev->of_node, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 432d781f05f3..362b6b5a28ee 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -155,8 +155,6 @@ struct arm_smmu_domain { struct iommu_domain domain; }; -static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); - static bool using_legacy_binding, using_generic_binding; static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) @@ -1804,18 +1802,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) } dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", smmu->num_context_banks, smmu->num_s2_context_banks); - /* - * Cavium CN88xx erratum #27704. - * Ensure ASID and VMID allocation is unique across all SMMUs in - * the system. - */ - if (smmu->model == CAVIUM_SMMUV2) { - smmu->cavium_id_base = - atomic_add_return(smmu->num_context_banks, - &cavium_smmu_context_count); - smmu->cavium_id_base -= smmu->num_context_banks; - dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); - } smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, sizeof(*smmu->cbs), GFP_KERNEL); if (!smmu->cbs) @@ -1884,6 +1870,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", smmu->ipa_size, smmu->pa_size); + if (smmu->impl && smmu->impl->cfg_probe) + return smmu->impl->cfg_probe(smmu); + return 0; } diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index d4fd29d70705..f4e90f33fce2 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -287,6 +287,7 @@ struct arm_smmu_impl { u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, u64 val); + int (*cfg_probe)(struct arm_smmu_device *smmu); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) -- cgit v1.2.3 From 62b993a36e4c2d60669beb3d1afe038ed44a41ec Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:36 +0100 Subject: iommu/arm-smmu: Add reset implementation hook Reset is an activity rife with implementation-defined poking. Add a corresponding hook, and use it to encapsulate the existing MMU-500 details. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-impl.c | 49 +++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/arm-smmu.c | 39 ++++------------------------------ drivers/iommu/arm-smmu.h | 1 + 3 files changed, 54 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index 696417908793..4dc8b1c4befb 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -4,6 +4,7 @@ #define pr_fmt(fmt) "arm-smmu: " fmt +#include #include #include "arm-smmu.h" @@ -67,6 +68,51 @@ const struct arm_smmu_impl cavium_impl = { }; +#define ARM_MMU500_ACTLR_CPRE (1 << 1) + +#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) +#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) + +static int arm_mmu500_reset(struct arm_smmu_device *smmu) +{ + u32 reg, major; + int i; + /* + * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before + * writes to the context bank ACTLRs will stick. And we just hope that + * Secure has also cleared SACR.CACHE_LOCK for this to take effect... + */ + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); + major = FIELD_GET(ID7_MAJOR, reg); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); + if (major >= 2) + reg &= ~ARM_MMU500_ACR_CACHE_LOCK; + /* + * Allow unmatched Stream IDs to allocate bypass + * TLB entries for reduced latency. + */ + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg); + + /* + * Disable MMU-500's not-particularly-beneficial next-page + * prefetcher for the sake of errata #841119 and #826419. + */ + for (i = 0; i < smmu->num_context_banks; ++i) { + reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); + reg &= ~ARM_MMU500_ACTLR_CPRE; + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); + } + + return 0; +} + +const struct arm_smmu_impl arm_mmu500_impl = { + .reset = arm_mmu500_reset, +}; + + struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) { /* @@ -76,6 +122,9 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) * mutually-exclusive assignments. */ switch (smmu->model) { + case ARM_MMU500: + smmu->impl = &arm_mmu500_impl; + break; case CAVIUM_SMMUV2: smmu->impl = &cavium_impl; break; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 362b6b5a28ee..fc98992d120d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -54,12 +54,6 @@ */ #define QCOM_DUMMY_VAL -1 -#define ARM_MMU500_ACTLR_CPRE (1 << 1) - -#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) -#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) -#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) - #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ #define TLB_SPIN_COUNT 10 @@ -1574,7 +1568,7 @@ static struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { int i; - u32 reg, major; + u32 reg; /* clear global FSR */ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); @@ -1587,38 +1581,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) for (i = 0; i < smmu->num_mapping_groups; ++i) arm_smmu_write_sme(smmu, i); - if (smmu->model == ARM_MMU500) { - /* - * Before clearing ARM_MMU500_ACTLR_CPRE, need to - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK - * bit is only present in MMU-500r2 onwards. - */ - reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); - major = FIELD_GET(ID7_MAJOR, reg); - reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); - if (major >= 2) - reg &= ~ARM_MMU500_ACR_CACHE_LOCK; - /* - * Allow unmatched Stream IDs to allocate bypass - * TLB entries for reduced latency. - */ - reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; - arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg); - } - /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { arm_smmu_write_context_bank(smmu, i); arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT); - /* - * Disable MMU-500's not-particularly-beneficial next-page - * prefetcher for the sake of errata #841119 and #826419. - */ - if (smmu->model == ARM_MMU500) { - reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); - reg &= ~ARM_MMU500_ACTLR_CPRE; - arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); - } } /* Invalidate the TLB, just in case */ @@ -1652,6 +1618,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_EXIDS) reg |= sCR0_EXIDENABLE; + if (smmu->impl && smmu->impl->reset) + smmu->impl->reset(smmu); + /* Push the button */ arm_smmu_tlb_sync_global(smmu); arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index f4e90f33fce2..ddafe872a396 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -288,6 +288,7 @@ struct arm_smmu_impl { void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, u64 val); int (*cfg_probe)(struct arm_smmu_device *smmu); + int (*reset)(struct arm_smmu_device *smmu); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) -- cgit v1.2.3 From ba7e4a08bbf7441664b3d140671db8d08ea15f22 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Aug 2019 19:37:37 +0100 Subject: iommu/arm-smmu: Add context init implementation hook Allocating and initialising a context for a domain is another point where certain implementations are known to want special behaviour. Currently the other half of the Cavium workaround comes into play here, so let's finish the job to get the whole thing right out of the way. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-impl.c | 42 +++++++++++++++++++++++++++++++---- drivers/iommu/arm-smmu.c | 51 ++++++++----------------------------------- drivers/iommu/arm-smmu.h | 42 +++++++++++++++++++++++++++++++++-- 3 files changed, 87 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index 4dc8b1c4befb..e22e9004f449 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -48,25 +48,60 @@ const struct arm_smmu_impl calxeda_impl = { }; +struct cavium_smmu { + struct arm_smmu_device smmu; + u32 id_base; +}; + static int cavium_cfg_probe(struct arm_smmu_device *smmu) { static atomic_t context_count = ATOMIC_INIT(0); + struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); /* * Cavium CN88xx erratum #27704. * Ensure ASID and VMID allocation is unique across all SMMUs in * the system. */ - smmu->cavium_id_base = atomic_fetch_add(smmu->num_context_banks, - &context_count); + cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count); dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); return 0; } +int cavium_init_context(struct arm_smmu_domain *smmu_domain) +{ + struct cavium_smmu *cs = container_of(smmu_domain->smmu, + struct cavium_smmu, smmu); + + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) + smmu_domain->cfg.vmid += cs->id_base; + else + smmu_domain->cfg.asid += cs->id_base; + + return 0; +} + const struct arm_smmu_impl cavium_impl = { .cfg_probe = cavium_cfg_probe, + .init_context = cavium_init_context, }; +struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu) +{ + struct cavium_smmu *cs; + + cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL); + if (!cs) + return ERR_PTR(-ENOMEM); + + cs->smmu = *smmu; + cs->smmu.impl = &cavium_impl; + + devm_kfree(smmu->dev, smmu); + + return &cs->smmu; +} + #define ARM_MMU500_ACTLR_CPRE (1 << 1) @@ -126,8 +161,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) smmu->impl = &arm_mmu500_impl; break; case CAVIUM_SMMUV2: - smmu->impl = &cavium_impl; - break; + return cavium_smmu_impl_init(smmu); default: break; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fc98992d120d..b8628e2ab579 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -111,44 +110,6 @@ struct arm_smmu_master_cfg { #define for_each_cfg_sme(fw, i, idx) \ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) -enum arm_smmu_context_fmt { - ARM_SMMU_CTX_FMT_NONE, - ARM_SMMU_CTX_FMT_AARCH64, - ARM_SMMU_CTX_FMT_AARCH32_L, - ARM_SMMU_CTX_FMT_AARCH32_S, -}; - -struct arm_smmu_cfg { - u8 cbndx; - u8 irptndx; - union { - u16 asid; - u16 vmid; - }; - enum arm_smmu_cbar_type cbar; - enum arm_smmu_context_fmt fmt; -}; -#define INVALID_IRPTNDX 0xff - -enum arm_smmu_domain_stage { - ARM_SMMU_DOMAIN_S1 = 0, - ARM_SMMU_DOMAIN_S2, - ARM_SMMU_DOMAIN_NESTED, - ARM_SMMU_DOMAIN_BYPASS, -}; - -struct arm_smmu_domain { - struct arm_smmu_device *smmu; - struct io_pgtable_ops *pgtbl_ops; - const struct iommu_gather_ops *tlb_ops; - struct arm_smmu_cfg cfg; - enum arm_smmu_domain_stage stage; - bool non_strict; - struct mutex init_mutex; /* Protects smmu pointer */ - spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ - struct iommu_domain domain; -}; - static bool using_legacy_binding, using_generic_binding; static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) @@ -749,9 +710,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, } if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) - cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base; + cfg->vmid = cfg->cbndx + 1; else - cfg->asid = cfg->cbndx + smmu->cavium_id_base; + cfg->asid = cfg->cbndx; + + smmu_domain->smmu = smmu; + if (smmu->impl && smmu->impl->init_context) { + ret = smmu->impl->init_context(smmu_domain); + if (ret) + goto out_unlock; + } pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = smmu->pgsize_bitmap, @@ -765,7 +733,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->non_strict) pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; - smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { ret = -ENOMEM; diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index ddafe872a396..611ed742e56f 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -270,14 +271,50 @@ struct arm_smmu_device { struct clk_bulk_data *clks; int num_clks; - u32 cavium_id_base; /* Specific to Cavium */ - spinlock_t global_sync_lock; /* IOMMU core code handle */ struct iommu_device iommu; }; +enum arm_smmu_context_fmt { + ARM_SMMU_CTX_FMT_NONE, + ARM_SMMU_CTX_FMT_AARCH64, + ARM_SMMU_CTX_FMT_AARCH32_L, + ARM_SMMU_CTX_FMT_AARCH32_S, +}; + +struct arm_smmu_cfg { + u8 cbndx; + u8 irptndx; + union { + u16 asid; + u16 vmid; + }; + enum arm_smmu_cbar_type cbar; + enum arm_smmu_context_fmt fmt; +}; +#define INVALID_IRPTNDX 0xff + +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, + ARM_SMMU_DOMAIN_BYPASS, +}; + +struct arm_smmu_domain { + struct arm_smmu_device *smmu; + struct io_pgtable_ops *pgtbl_ops; + const struct iommu_gather_ops *tlb_ops; + struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; + bool non_strict; + struct mutex init_mutex; /* Protects smmu pointer */ + spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ + struct iommu_domain domain; +}; + /* Implementation details, yay! */ struct arm_smmu_impl { @@ -289,6 +326,7 @@ struct arm_smmu_impl { u64 val); int (*cfg_probe)(struct arm_smmu_device *smmu); int (*reset)(struct arm_smmu_device *smmu); + int (*init_context)(struct arm_smmu_domain *smmu_domain); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) -- cgit v1.2.3 From 4b67f1ddcf23d6dc4b5a3a486b758cb91f725e72 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 10:58:03 +0100 Subject: iommu/arm-smmu: Make private implementation details static Many of the device-specific implementation details in 'arm-smmu-impl.c' are exposed to other compilation units. Whilst we may require this in the future, let's make it all 'static' for now so that we can expose things on a case-by-case basic. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-impl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index e22e9004f449..5c87a38620c4 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -42,7 +42,7 @@ static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, } /* Since we don't care for sGFAR, we can do without 64-bit accessors */ -const struct arm_smmu_impl calxeda_impl = { +static const struct arm_smmu_impl calxeda_impl = { .read_reg = arm_smmu_read_ns, .write_reg = arm_smmu_write_ns, }; @@ -68,7 +68,7 @@ static int cavium_cfg_probe(struct arm_smmu_device *smmu) return 0; } -int cavium_init_context(struct arm_smmu_domain *smmu_domain) +static int cavium_init_context(struct arm_smmu_domain *smmu_domain) { struct cavium_smmu *cs = container_of(smmu_domain->smmu, struct cavium_smmu, smmu); @@ -81,12 +81,12 @@ int cavium_init_context(struct arm_smmu_domain *smmu_domain) return 0; } -const struct arm_smmu_impl cavium_impl = { +static const struct arm_smmu_impl cavium_impl = { .cfg_probe = cavium_cfg_probe, .init_context = cavium_init_context, }; -struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu) +static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu) { struct cavium_smmu *cs; @@ -143,7 +143,7 @@ static int arm_mmu500_reset(struct arm_smmu_device *smmu) return 0; } -const struct arm_smmu_impl arm_mmu500_impl = { +static const struct arm_smmu_impl arm_mmu500_impl = { .reset = arm_mmu500_reset, }; -- cgit v1.2.3 From d720e64150c79d14f4faf931604faa1f0755134d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 20 Aug 2019 12:38:49 +0100 Subject: iommu/arm-smmu: Ensure 64-bit I/O accessors are available on 32-bit CPU As part of the grand SMMU driver refactoring effort, the I/O register accessors were moved into 'arm-smmu.h' in commit 6d7dff62afb0 ("iommu/arm-smmu: Move Secure access quirk to implementation"). On 32-bit architectures (such as ARM), the 64-bit accessors are defined in 'linux/io-64-nonatomic-hi-lo.h', so include this header to fix the build. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 1 - drivers/iommu/arm-smmu.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b8628e2ab579..523a88842e7f 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 611ed742e56f..ac9eac966cf5 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 05cbaf4ddd02b69d78d43481e3813b4579572f71 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 13:25:36 +0100 Subject: iommu/arm-smmu-v3: Document ordering guarantees of command insertion It turns out that we've always relied on some subtle ordering guarantees when inserting commands into the SMMUv3 command queue. With the recent changes to elide locking when possible, these guarantees become more subtle and even more important. Add a comment documented the barrier semantics of command insertion so that we don't have to derive the behaviour from scratch each time it comes up on the list. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index b36a99971401..3402b1bc8e94 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1286,6 +1286,22 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, } } +/* + * This is the actual insertion function, and provides the following + * ordering guarantees to callers: + * + * - There is a dma_wmb() before publishing any commands to the queue. + * This can be relied upon to order prior writes to data structures + * in memory (such as a CD or an STE) before the command. + * + * - On completion of a CMD_SYNC, there is a control dependency. + * This can be relied upon to order subsequent writes to memory (e.g. + * freeing an IOVA) after completion of the CMD_SYNC. + * + * - Command insertion is totally ordered, so if two CPUs each race to + * insert their own list of commands then all of the commands from one + * CPU will appear before any of the commands from the other CPU. + */ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, u64 *cmds, int n, bool sync) { -- cgit v1.2.3 From b5e86196b83fd68e065a7c811ab8925fb0dc3893 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 21 Aug 2019 14:17:00 +0100 Subject: iommu/arm-smmu-v3: Disable detection of ATS and PRI Detecting the ATS capability of the SMMU at probe time introduces a spinlock into the ->unmap() fast path, even when ATS is not actually in use. Furthermore, the ATC invalidation that exists is broken, as it occurs before invalidation of the main SMMU TLB which leaves a window where the ATC can be repopulated with stale entries. Given that ATS is both a new feature and a specialist sport, disable it for now whilst we fix it properly in subsequent patches. Since PRI requires ATS, disable that too. Cc: Fixes: 9ce27afc0830 ("iommu/arm-smmu-v3: Add support for PCI ATS") Acked-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 3402b1bc8e94..7a368059cd7d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -3295,11 +3295,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Boolean feature flags */ +#if 0 /* ATS invalidation is slow and broken */ if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) smmu->features |= ARM_SMMU_FEAT_PRI; if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) smmu->features |= ARM_SMMU_FEAT_ATS; +#endif if (reg & IDR0_SEV) smmu->features |= ARM_SMMU_FEAT_SEV; -- cgit v1.2.3 From f75d8e33df91ee66fc12820d0da1454b76ebcff9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 17:32:18 +0100 Subject: iommu/arm-smmu-v3: Remove boolean bitfield for 'ats_enabled' flag There's really no need for this to be a bitfield, particularly as we don't have bitwise addressing on arm64. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7a368059cd7d..2be11a11bb8b 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -637,7 +637,7 @@ struct arm_smmu_master { struct list_head domain_head; u32 *sids; unsigned int num_sids; - bool ats_enabled :1; + bool ats_enabled; }; /* SMMU private data for an IOMMU domain */ -- cgit v1.2.3 From 7314ca8699e1defd6f2883f203c0e828a4f51f9f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 21 Aug 2019 12:38:15 +0100 Subject: iommu/arm-smmu-v3: Don't issue CMD_SYNC for zero-length invalidations Calling arm_smmu_tlb_inv_range() with a size of zero, perhaps due to an empty 'iommu_iotlb_gather' structure, should be a NOP. Elide the CMD_SYNC when there is no invalidation to be performed. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 2be11a11bb8b..b7b3b0ff8ed6 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1977,6 +1977,9 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, }, }; + if (!size) + return; + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cmd.opcode = CMDQ_OP_TLBI_NH_VA; cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; -- cgit v1.2.3 From bfff88ec1afefb76669d29da9fc90d5ad7c53ec3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 14:28:59 +0100 Subject: iommu/arm-smmu-v3: Rework enabling/disabling of ATS for PCI masters To prevent any potential issues arising from speculative Address Translation Requests from an ATS-enabled PCIe endpoint, rework our ATS enabling/disabling logic so that we enable ATS at the SMMU before we enable it at the endpoint, and disable things in the opposite order. Reviewed-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 47 +++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index b7b3b0ff8ed6..d7c65dfe42dc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2286,44 +2286,52 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) } } -static int arm_smmu_enable_ats(struct arm_smmu_master *master) +static bool arm_smmu_ats_supported(struct arm_smmu_master *master) { - int ret; - size_t stu; struct pci_dev *pdev; struct arm_smmu_device *smmu = master->smmu; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) || !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled()) - return -ENXIO; + return false; pdev = to_pci_dev(master->dev); - if (pdev->untrusted) - return -EPERM; + return !pdev->untrusted && pdev->ats_cap; +} - /* Smallest Translation Unit: log2 of the smallest supported granule */ - stu = __ffs(smmu->pgsize_bitmap); +static void arm_smmu_enable_ats(struct arm_smmu_master *master) +{ + size_t stu; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = master->smmu; - ret = pci_enable_ats(pdev, stu); - if (ret) - return ret; + /* Don't enable ATS at the endpoint if it's not enabled in the STE */ + if (!master->ats_enabled) + return; - master->ats_enabled = true; - return 0; + /* Smallest Translation Unit: log2 of the smallest supported granule */ + stu = __ffs(smmu->pgsize_bitmap); + pdev = to_pci_dev(master->dev); + if (pci_enable_ats(pdev, stu)) + dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } static void arm_smmu_disable_ats(struct arm_smmu_master *master) { struct arm_smmu_cmdq_ent cmd; - if (!master->ats_enabled || !dev_is_pci(master->dev)) + if (!master->ats_enabled) return; + pci_disable_ats(to_pci_dev(master->dev)); + /* + * Ensure ATS is disabled at the endpoint before we issue the + * ATC invalidation via the SMMU. + */ + wmb(); arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); arm_smmu_atc_inv_master(master, &cmd); - pci_disable_ats(to_pci_dev(master->dev)); - master->ats_enabled = false; } static void arm_smmu_detach_dev(struct arm_smmu_master *master) @@ -2338,10 +2346,10 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) list_del(&master->domain_head); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + arm_smmu_disable_ats(master); master->domain = NULL; + master->ats_enabled = false; arm_smmu_install_ste_for_dev(master); - - arm_smmu_disable_ats(master); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -2386,12 +2394,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) - arm_smmu_enable_ats(master); + master->ats_enabled = arm_smmu_ats_supported(master); if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg); arm_smmu_install_ste_for_dev(master); + arm_smmu_enable_ats(master); out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; -- cgit v1.2.3 From 353e3cf8590cf182a9f42e67993de3aca91e8090 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 15:12:12 +0100 Subject: iommu/arm-smmu-v3: Fix ATC invalidation ordering wrt main TLBs When invalidating the ATC for an PCIe endpoint using ATS, we must take care to complete invalidation of the main SMMU TLBs beforehand, otherwise the device could immediately repopulate its ATC with stale translations. Hooking the ATC invalidation into ->unmap() as we currently do does the exact opposite: it ensures that the ATC is invalidated *before* the main TLBs, which is bogus. Move ATC invalidation into the actual (leaf) invalidation routines so that it is always called after completing main TLB invalidation. Reviewed-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index d7c65dfe42dc..ca504a60312d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1961,6 +1961,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); } static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, @@ -1969,7 +1970,7 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, { u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; struct arm_smmu_device *smmu = smmu_domain->smmu; - unsigned long end = iova + size; + unsigned long start = iova, end = iova + size; int i = 0; struct arm_smmu_cmdq_ent cmd = { .tlbi = { @@ -2001,6 +2002,12 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, } arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); + + /* + * Unfortunately, this can't be leaf-only since we may have + * zapped an entire table. + */ + arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); } static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, @@ -2420,18 +2427,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather) { - int ret; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; if (!ops) return 0; - ret = ops->unmap(ops, iova, size, gather); - if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size)) - return 0; - - return ret; + return ops->unmap(ops, iova, size, gather); } static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) -- cgit v1.2.3 From cdb8a3c3463563b7bdb6f653bf4b0ffa3a95f366 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 16:28:54 +0100 Subject: iommu/arm-smmu-v3: Avoid locking on invalidation path when not using ATS When ATS is not in use, we can avoid taking the 'devices_lock' for the domain on the invalidation path by simply caching the number of ATS masters currently attached. The fiddly part is handling a concurrent ->attach() of an ATS-enabled master to a domain that is being invalidated, but we can handle this using an 'smp_mb()' to ensure that our check of the count is ordered after completion of our prior TLB invalidation. This also makes our ->attach() and ->detach() flows symmetric wrt ATS interactions. Acked-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ca504a60312d..0e43529d55fe 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -654,6 +654,7 @@ struct arm_smmu_domain { struct io_pgtable_ops *pgtbl_ops; bool non_strict; + atomic_t nr_ats_masters; enum arm_smmu_domain_stage stage; union { @@ -1926,6 +1927,23 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) return 0; + /* + * Ensure that we've completed prior invalidation of the main TLBs + * before we read 'nr_ats_masters' in case of a concurrent call to + * arm_smmu_enable_ats(): + * + * // unmap() // arm_smmu_enable_ats() + * TLBI+SYNC atomic_inc(&nr_ats_masters); + * smp_mb(); [...] + * atomic_read(&nr_ats_masters); pci_enable_ats() // writel() + * + * Ensures that we always see the incremented 'nr_ats_masters' count if + * ATS was enabled at the PCI device before completion of the TLBI. + */ + smp_mb(); + if (!atomic_read(&smmu_domain->nr_ats_masters)) + return 0; + arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); @@ -2312,6 +2330,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) size_t stu; struct pci_dev *pdev; struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_domain *smmu_domain = master->domain; /* Don't enable ATS at the endpoint if it's not enabled in the STE */ if (!master->ats_enabled) @@ -2320,6 +2339,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) /* Smallest Translation Unit: log2 of the smallest supported granule */ stu = __ffs(smmu->pgsize_bitmap); pdev = to_pci_dev(master->dev); + + atomic_inc(&smmu_domain->nr_ats_masters); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); if (pci_enable_ats(pdev, stu)) dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } @@ -2327,6 +2349,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) static void arm_smmu_disable_ats(struct arm_smmu_master *master) { struct arm_smmu_cmdq_ent cmd; + struct arm_smmu_domain *smmu_domain = master->domain; if (!master->ats_enabled) return; @@ -2339,6 +2362,7 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master) wmb(); arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); arm_smmu_atc_inv_master(master, &cmd); + atomic_dec(&smmu_domain->nr_ats_masters); } static void arm_smmu_detach_dev(struct arm_smmu_master *master) @@ -2349,11 +2373,12 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) if (!smmu_domain) return; + arm_smmu_disable_ats(master); + spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_del(&master->domain_head); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - arm_smmu_disable_ats(master); master->domain = NULL; master->ats_enabled = false; arm_smmu_install_ste_for_dev(master); @@ -2396,10 +2421,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) master->domain = smmu_domain; - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_add(&master->domain_head, &smmu_domain->devices); - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) master->ats_enabled = arm_smmu_ats_supported(master); @@ -2407,7 +2428,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg); arm_smmu_install_ste_for_dev(master); + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_add(&master->domain_head, &smmu_domain->devices); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + arm_smmu_enable_ats(master); + out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; -- cgit v1.2.3 From a91bcc2b65370e7debf1fc26b93a4c2a54433220 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 21 Aug 2019 18:41:27 +0100 Subject: Revert "iommu/arm-smmu-v3: Disable detection of ATS and PRI" This reverts commit b5e86196b83fd68e065a7c811ab8925fb0dc3893. Now that ATC invalidation is performed in the correct places and without incurring a locking overhead for non-ATS systems, we can re-enable the corresponding SMMU feature detection. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 0e43529d55fe..b8049ea2e455 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -3336,13 +3336,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Boolean feature flags */ -#if 0 /* ATS invalidation is slow and broken */ if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) smmu->features |= ARM_SMMU_FEAT_PRI; if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) smmu->features |= ARM_SMMU_FEAT_ATS; -#endif if (reg & IDR0_SEV) smmu->features |= ARM_SMMU_FEAT_SEV; -- cgit v1.2.3 From faf1498993cdf65fd3a624b7653bc91909135a55 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:46 +0200 Subject: iommu: Remember when default domain type was set on kernel command line Introduce an extensible concept to remember when certain configuration settings for the IOMMU code have been set on the kernel command line. This will be used later to prevent overwriting these settings with other defaults. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 70bfbcc09248..0ae34cca0d4a 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -32,6 +32,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; #endif static bool iommu_dma_strict __read_mostly = true; +static u32 iommu_cmd_line __read_mostly; struct iommu_group { struct kobject kobj; @@ -68,6 +69,18 @@ static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_SW_MSI] = "msi", }; +#define IOMMU_CMD_LINE_DMA_API BIT(0) + +static void iommu_set_cmd_line_dma_api(void) +{ + iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; +} + +static bool __maybe_unused iommu_cmd_line_dma_api(void) +{ + return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); +} + #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) @@ -165,6 +178,8 @@ static int __init iommu_set_def_domain_type(char *str) if (ret) return ret; + iommu_set_cmd_line_dma_api(); + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; return 0; } -- cgit v1.2.3 From 8a69961c7f7583742ab9064feab5ea533a6b1b97 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:47 +0200 Subject: iommu: Add helpers to set/get default domain type Add a couple of functions to allow changing the default domain type from architecture code and a function for iommu drivers to request whether the default domain is passthrough. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 22 ++++++++++++++++++++++ include/linux/iommu.h | 16 ++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0ae34cca0d4a..c5e0fc5ffe8b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2213,6 +2213,28 @@ int iommu_request_dma_domain_for_dev(struct device *dev) return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA); } +void iommu_set_default_passthrough(bool cmd_line) +{ + if (cmd_line) + iommu_set_cmd_line_dma_api(); + + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; +} + +void iommu_set_default_translated(bool cmd_line) +{ + if (cmd_line) + iommu_set_cmd_line_dma_api(); + + iommu_def_domain_type = IOMMU_DOMAIN_DMA; +} + +bool iommu_default_passthrough(void) +{ + return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; +} +EXPORT_SYMBOL_GPL(iommu_default_passthrough); + const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { const struct iommu_ops *ops = NULL; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 64ebaff33455..29bac5345563 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -436,6 +436,9 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dma_domain_for_dev(struct device *dev); +extern void iommu_set_default_passthrough(bool cmd_line); +extern void iommu_set_default_translated(bool cmd_line); +extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); @@ -736,6 +739,19 @@ static inline int iommu_request_dma_domain_for_dev(struct device *dev) return -ENODEV; } +static inline void iommu_set_default_passthrough(bool cmd_line) +{ +} + +static inline void iommu_set_default_translated(bool cmd_line) +{ +} + +static inline bool iommu_default_passthrough(void) +{ + return true; +} + static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { -- cgit v1.2.3 From adab0b07cbbc73f9fc338e4fc1749714dd093a7c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:48 +0200 Subject: iommu: Use Functions to set default domain type in iommu_set_def_domain_type() There are functions now to set the default domain type which take care of updating other necessary state. Don't open-code it in iommu_set_def_domain_type() and use those functions instead. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index c5e0fc5ffe8b..544f44fc08e4 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -178,9 +178,11 @@ static int __init iommu_set_def_domain_type(char *str) if (ret) return ret; - iommu_set_cmd_line_dma_api(); + if (pt) + iommu_set_default_passthrough(true); + else + iommu_set_default_translated(true); - iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; return 0; } early_param("iommu.passthrough", iommu_set_def_domain_type); -- cgit v1.2.3 From cc7c8ad9736b5db994b84cbb1347972d28852d3b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:49 +0200 Subject: iommu/amd: Request passthrough mode from IOMMU core Get rid of the iommu_pass_through variable and request passthrough mode via the new iommu core function. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 29eeea914660..07512d08dd00 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -436,7 +436,7 @@ static int iommu_init_device(struct device *dev) * invalid address), we ignore the capability for the device so * it'll be forced to go into translation mode. */ - if ((iommu_pass_through || !amd_iommu_force_isolation) && + if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { struct amd_iommu *iommu; @@ -2226,7 +2226,7 @@ static int amd_iommu_add_device(struct device *dev) BUG_ON(!dev_data); - if (iommu_pass_through || dev_data->iommu_v2) + if (dev_data->iommu_v2) iommu_request_dm_for_dev(dev); /* Domains are initialized for this device - have a look what we ended up with */ @@ -2805,7 +2805,7 @@ int __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { - swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; + swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; iommu_detected = 1; if (amd_iommu_unmap_flush) -- cgit v1.2.3 From 6b9a7d3a466fffc97c4b133852d3dd7905510dd6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:50 +0200 Subject: iommu/vt-d: Request passthrough mode from IOMMU core Get rid of the iommu_pass_through variable and request passthrough mode via the new iommu core function. Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d6003b391e2c..587337534b76 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3267,7 +3267,7 @@ static int __init init_dmars(void) iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } - if (iommu_pass_through) + if (iommu_default_passthrough()) iommu_identity_mapping |= IDENTMAP_ALL; #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA -- cgit v1.2.3 From 5fa9e7c5fa50f27afd71f2f6373179739bfa4034 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:53 +0200 Subject: iommu: Print default domain type on boot Introduce a subsys_initcall for IOMMU code and use it to print the default domain type at boot. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 544f44fc08e4..31a66c4600bc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -93,12 +93,40 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ static LIST_HEAD(iommu_device_list); static DEFINE_SPINLOCK(iommu_device_lock); +/* + * Use a function instead of an array here because the domain-type is a + * bit-field, so an array would waste memory. + */ +static const char *iommu_domain_type_str(unsigned int t) +{ + switch (t) { + case IOMMU_DOMAIN_BLOCKED: + return "Blocked"; + case IOMMU_DOMAIN_IDENTITY: + return "Passthrough"; + case IOMMU_DOMAIN_UNMANAGED: + return "Unmanaged"; + case IOMMU_DOMAIN_DMA: + return "Translated"; + default: + return "Unknown"; + } +} + +static int __init iommu_subsys_init(void) +{ + pr_info("Default domain type: %s\n", + iommu_domain_type_str(iommu_def_domain_type)); + + return 0; +} +subsys_initcall(iommu_subsys_init); + int iommu_device_register(struct iommu_device *iommu) { spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); - return 0; } -- cgit v1.2.3 From 22bb182c839d8ef6c08cf548feb0451c429216d8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:54 +0200 Subject: iommu: Set default domain type at runtime Set the default domain-type at runtime, not at compile-time. This keeps default domain type setting in one place when we have to change it at runtime. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 31a66c4600bc..9ad1b0af2306 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -26,11 +26,8 @@ static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); -#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; -#else -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; -#endif + +static unsigned int iommu_def_domain_type __read_mostly; static bool iommu_dma_strict __read_mostly = true; static u32 iommu_cmd_line __read_mostly; @@ -76,7 +73,7 @@ static void iommu_set_cmd_line_dma_api(void) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; } -static bool __maybe_unused iommu_cmd_line_dma_api(void) +static bool iommu_cmd_line_dma_api(void) { return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); } @@ -115,8 +112,18 @@ static const char *iommu_domain_type_str(unsigned int t) static int __init iommu_subsys_init(void) { - pr_info("Default domain type: %s\n", - iommu_domain_type_str(iommu_def_domain_type)); + bool cmd_line = iommu_cmd_line_dma_api(); + + if (!cmd_line) { + if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) + iommu_set_default_passthrough(false); + else + iommu_set_default_translated(false); + } + + pr_info("Default domain type: %s %s\n", + iommu_domain_type_str(iommu_def_domain_type), + cmd_line ? "(set via kernel command line)" : ""); return 0; } -- cgit v1.2.3 From 2cc13bb4f59fa7e66acf5b1b78bdf97d73d3416a Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 19 Aug 2019 15:22:55 +0200 Subject: iommu: Disable passthrough mode when SME is active Using Passthrough mode when SME is active causes certain devices to use the SWIOTLB bounce buffer. The bounce buffer code has an upper limit of 256kb for the size of DMA allocations, which is too small for certain devices and causes them to fail. With this patch we enable IOMMU by default when SME is active in the system, making the default configuration work for more systems than it does now. Users that don't want IOMMUs to be enabled still can disable them with kernel parameters. Reviewed-by: Tom Lendacky Tested-by: Tom Lendacky Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9ad1b0af2306..0f585b614657 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -119,6 +119,11 @@ static int __init iommu_subsys_init(void) iommu_set_default_passthrough(false); else iommu_set_default_translated(false); + + if (iommu_default_passthrough() && sme_active()) { + pr_info("SME detected - Disabling default IOMMU Passthrough\n"); + iommu_set_default_translated(false); + } } pr_info("Default domain type: %s %s\n", -- cgit v1.2.3 From 93d051550ee02eaff9a2541d825605a7bd778027 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 21 Aug 2019 13:10:04 +0800 Subject: iommu/amd: Override wrong IVRS IOAPIC on Raven Ridge systems Raven Ridge systems may have malfunction touchpad or hang at boot if incorrect IVRS IOAPIC is provided by BIOS. Users already found correct "ivrs_ioapic=" values, let's put them inside kernel to workaround buggy BIOS. BugLink: https://bugs.launchpad.net/bugs/1795292 BugLink: https://bugs.launchpad.net/bugs/1837688 Reported-by: kbuild test robot Signed-off-by: Kai-Heng Feng Signed-off-by: Joerg Roedel --- drivers/iommu/Makefile | 2 +- drivers/iommu/amd_iommu.h | 14 ++++++ drivers/iommu/amd_iommu_init.c | 5 ++- drivers/iommu/amd_iommu_quirks.c | 92 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 drivers/iommu/amd_iommu.h create mode 100644 drivers/iommu/amd_iommu_quirks.c (limited to 'drivers') diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f13f36ae1af6..c6a277e69848 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h new file mode 100644 index 000000000000..12d540d9b59b --- /dev/null +++ b/drivers/iommu/amd_iommu.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef AMD_IOMMU_H +#define AMD_IOMMU_H + +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); + +#ifdef CONFIG_DMI +void amd_iommu_apply_ivrs_quirks(void); +#else +static void amd_iommu_apply_ivrs_quirks(void) { } +#endif + +#endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 4413aa67000e..568c52317757 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -32,6 +32,7 @@ #include #include +#include "amd_iommu.h" #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" @@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; @@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, if (ret) return ret; + amd_iommu_apply_ivrs_quirks(); + /* * First save the recommended feature enable bits from ACPI */ diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c new file mode 100644 index 000000000000..c235f79b7a20 --- /dev/null +++ b/drivers/iommu/amd_iommu_quirks.c @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Quirks for AMD IOMMU + * + * Copyright (C) 2019 Kai-Heng Feng + */ + +#ifdef CONFIG_DMI +#include + +#include "amd_iommu.h" + +#define IVHD_SPECIAL_IOAPIC 1 + +struct ivrs_quirk_entry { + u8 id; + u16 devid; +}; + +enum { + DELL_INSPIRON_7375 = 0, + DELL_LATITUDE_5495, + LENOVO_IDEAPAD_330S_15ARR, +}; + +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = { + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */ + [DELL_INSPIRON_7375] = { + { .id = 4, .devid = 0xa0 }, + { .id = 5, .devid = 0x2 }, + {} + }, + /* ivrs_ioapic[4]=00:14.0 */ + [DELL_LATITUDE_5495] = { + { .id = 4, .devid = 0xa0 }, + {} + }, + /* ivrs_ioapic[32]=00:14.0 */ + [LENOVO_IDEAPAD_330S_15ARR] = { + { .id = 32, .devid = 0xa0 }, + {} + }, + {} +}; + +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d) +{ + const struct ivrs_quirk_entry *i; + + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++) + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0); + + return 0; +} + +static const struct dmi_system_id ivrs_quirks[] __initconst = { + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Inspiron 7375", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Latitude 5495", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Lenovo ideapad 330S-15ARR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR], + }, + {} +}; + +void __init amd_iommu_apply_ivrs_quirks(void) +{ + dmi_check_system(ivrs_quirks); +} +#endif -- cgit v1.2.3 From 3d708895325b78506e8daf00ef31549476e8586a Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Wed, 28 Aug 2019 17:39:43 -0400 Subject: iommu/amd: Silence warnings under memory pressure When running heavy memory pressure workloads, the system is throwing endless warnings, smartpqi 0000:23:00.0: AMD-Vi: IOMMU mapping error in map_sg (io-pages: 5 reason: -12) Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 swapper/10: page allocation failure: order:0, mode:0xa20(GFP_ATOMIC), nodemask=(null),cpuset=/,mems_allowed=0,4 Call Trace: dump_stack+0x62/0x9a warn_alloc.cold.43+0x8a/0x148 __alloc_pages_nodemask+0x1a5c/0x1bb0 get_zeroed_page+0x16/0x20 iommu_map_page+0x477/0x540 map_sg+0x1ce/0x2f0 scsi_dma_map+0xc6/0x160 pqi_raid_submit_scsi_cmd_with_io_request+0x1c3/0x470 [smartpqi] do_IRQ+0x81/0x170 common_interrupt+0xf/0xf because the allocation could fail from iommu_map_page(), and the volume of this call could be huge which may generate a lot of serial console output and cosumes all CPUs. Fix it by silencing the warning in this call site, and there is still a dev_err() later to notify the failure. Signed-off-by: Qian Cai Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 008da21a2592..d365ee449a2c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2547,7 +2547,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, bus_addr = address + s->dma_address + (j << PAGE_SHIFT); phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); + ret = iommu_map_page(domain, bus_addr, phys_addr, + PAGE_SIZE, prot, + GFP_ATOMIC | __GFP_NOWARN); if (ret) goto out_unmap; -- cgit v1.2.3 From 0d87308cca2c124f9bce02383f1d9632c9be89c4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 28 Aug 2019 06:13:38 -0700 Subject: iommu/iova: Avoid false sharing on fq_timer_on In commit 14bd9a607f90 ("iommu/iova: Separate atomic variables to improve performance") Jinyu Qi identified that the atomic_cmpxchg() in queue_iova() was causing a performance loss and moved critical fields so that the false sharing would not impact them. However, avoiding the false sharing in the first place seems easy. We should attempt the atomic_cmpxchg() no more than 100 times per second. Adding an atomic_read() will keep the cache line mostly shared. This false sharing came with commit 9a005a800ae8 ("iommu/iova: Add flush timer"). Signed-off-by: Eric Dumazet Fixes: 9a005a800ae8 ('iommu/iova: Add flush timer') Cc: Jinyu Qi Cc: Joerg Roedel Acked-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/iova.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 3e1a8a675572..41c605b0058f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad, spin_unlock_irqrestore(&fq->lock, flags); - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) + /* Avoid false sharing as much as possible. */ + if (!atomic_read(&iovad->fq_timer_on) && + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } -- cgit v1.2.3 From 2c70010867f164d1b30e787e360e05d10cc40046 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 20 Aug 2019 01:53:17 -0700 Subject: iommu/vt-d: Fix wrong analysis whether devices share the same bus set_msi_sid_cb() is used to determine whether device aliases share the same bus, but it can provide false indications that aliases use the same bus when in fact they do not. The reason is that set_msi_sid_cb() assumes that pdev is fixed, while actually pci_for_each_dma_alias() can call fn() when pdev is set to a subordinate device. As a result, running an VM on ESX with VT-d emulation enabled can results in the log warning such as: DMAR: [INTR-REMAP] Request device [00:11.0] fault index 3b [fault reason 38] Blocked an interrupt request due to source-id verification failure This seems to cause additional ata errors such as: ata3.00: qc timeout (cmd 0xa1) ata3.00: failed to IDENTIFY (I/O error, err_mask=0x4) These timeouts also cause boot to be much longer and other errors. Fix it by checking comparing the alias with the previous one instead. Fixes: 3f0c625c6ae71 ("iommu/vt-d: Allow interrupts from the entire bus for aliased devices") Cc: stable@vger.kernel.org Cc: Logan Gunthorpe Cc: David Woodhouse Cc: Joerg Roedel Cc: Jacob Pan Signed-off-by: Nadav Amit Reviewed-by: Logan Gunthorpe Signed-off-by: Joerg Roedel --- drivers/iommu/intel_irq_remapping.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 4786ca061e31..81e43c1df7ec 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct set_msi_sid_data *data = opaque; + if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias)) + data->busmatch_count++; + data->pdev = pdev; data->alias = alias; data->count++; - if (PCI_BUS_NUM(alias) == pdev->bus->number) - data->busmatch_count++; - return 0; } -- cgit v1.2.3 From 4dbd258ff63e0597ee8fb44d277c6c701f5019d9 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Wed, 21 Aug 2019 14:09:40 +0200 Subject: iommu: Revisit iommu_insert_resv_region() implementation Current implementation is recursive and in case of allocation failure the existing @regions list is altered. A non recursive version looks better for maintainability and simplifies the error handling. We use a separate stack for overlapping segment merging. The elements are sorted by start address and then by type, if their start address match. Note this new implementation may change the region order of appearance in /sys/kernel/iommu_groups//reserved_regions files but this order has never been documented, see commit bc7d12b91bd3 ("iommu: Implement reserved_regions iommu-group sysfs file"). Signed-off-by: Eric Auger Reviewed-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 96 +++++++++++++++++++++++++-------------------------- 1 file changed, 47 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0f585b614657..b6980938882e 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -286,60 +286,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) * @new: new region to insert * @regions: list of regions * - * The new element is sorted by address with respect to the other - * regions of the same type. In case it overlaps with another - * region of the same type, regions are merged. In case it - * overlaps with another region of different type, regions are - * not merged. + * Elements are sorted by start address and overlapping segments + * of the same type are merged. */ -static int iommu_insert_resv_region(struct iommu_resv_region *new, - struct list_head *regions) +int iommu_insert_resv_region(struct iommu_resv_region *new, + struct list_head *regions) { - struct iommu_resv_region *region; - phys_addr_t start = new->start; - phys_addr_t end = new->start + new->length - 1; - struct list_head *pos = regions->next; - - while (pos != regions) { - struct iommu_resv_region *entry = - list_entry(pos, struct iommu_resv_region, list); - phys_addr_t a = entry->start; - phys_addr_t b = entry->start + entry->length - 1; - int type = entry->type; - - if (end < a) { - goto insert; - } else if (start > b) { - pos = pos->next; - } else if ((start >= a) && (end <= b)) { - if (new->type == type) - return 0; - else - pos = pos->next; + struct iommu_resv_region *iter, *tmp, *nr, *top; + LIST_HEAD(stack); + + nr = iommu_alloc_resv_region(new->start, new->length, + new->prot, new->type); + if (!nr) + return -ENOMEM; + + /* First add the new element based on start address sorting */ + list_for_each_entry(iter, regions, list) { + if (nr->start < iter->start || + (nr->start == iter->start && nr->type <= iter->type)) + break; + } + list_add_tail(&nr->list, &iter->list); + + /* Merge overlapping segments of type nr->type in @regions, if any */ + list_for_each_entry_safe(iter, tmp, regions, list) { + phys_addr_t top_end, iter_end = iter->start + iter->length - 1; + + /* no merge needed on elements of different types than @nr */ + if (iter->type != nr->type) { + list_move_tail(&iter->list, &stack); + continue; + } + + /* look for the last stack element of same type as @iter */ + list_for_each_entry_reverse(top, &stack, list) + if (top->type == iter->type) + goto check_overlap; + + list_move_tail(&iter->list, &stack); + continue; + +check_overlap: + top_end = top->start + top->length - 1; + + if (iter->start > top_end + 1) { + list_move_tail(&iter->list, &stack); } else { - if (new->type == type) { - phys_addr_t new_start = min(a, start); - phys_addr_t new_end = max(b, end); - int ret; - - list_del(&entry->list); - entry->start = new_start; - entry->length = new_end - new_start + 1; - ret = iommu_insert_resv_region(entry, regions); - kfree(entry); - return ret; - } else { - pos = pos->next; - } + top->length = max(top_end, iter_end) - top->start + 1; + list_del(&iter->list); + kfree(iter); } } -insert: - region = iommu_alloc_resv_region(new->start, new->length, - new->prot, new->type); - if (!region) - return -ENOMEM; - - list_add_tail(®ion->list, pos); + list_splice(&stack, regions); return 0; } -- cgit v1.2.3 From cecdce9d7eed33ce24da8dd4a2151b68d5c17865 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:47 +0800 Subject: iommu/mediatek: Use a struct as the platform data Use a struct as the platform special data instead of the enumeration. This is a prepare patch for adding mt8183 iommu support. Signed-off-by: Yong Wu Reviewed-by: Matthias Brugger Reviewed-by: Evan Green Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 24 ++++++++++++++++-------- drivers/iommu/mtk_iommu.h | 6 +++++- 2 files changed, 21 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 82e4be4dfdaf..c6e6dc343771 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -46,7 +46,7 @@ #define REG_MMU_CTRL_REG 0x110 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) #define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ - ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) + ((data)->plat_data->m4u_plat == M4U_MT2712 ? 4 : 5) /* It's named by F_MMU_TF_PROT_SEL in mt2712. */ #define F_MMU_TF_PROTECT_SEL(prot, data) \ (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) @@ -512,7 +512,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) } regval = F_MMU_TF_PROTECT_SEL(2, data); - if (data->m4u_plat == M4U_MT8173) + if (data->plat_data->m4u_plat == M4U_MT8173) regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); @@ -533,14 +533,14 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) F_INT_PRETETCH_TRANSATION_FIFO_FAULT; writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); - if (data->m4u_plat == M4U_MT8173) + if (data->plat_data->m4u_plat == M4U_MT8173) regval = (data->protect_base >> 1) | (data->enable_4GB << 31); else regval = lower_32_bits(data->protect_base) | upper_32_bits(data->protect_base); writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); - if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { + if (data->enable_4GB && data->plat_data->m4u_plat != M4U_MT8173) { /* * If 4GB mode is enabled, the validate PA range is from * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. @@ -551,7 +551,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) writel_relaxed(0, data->base + REG_MMU_DCM_DIS); /* It's MISC control register whose default value is ok except mt8173.*/ - if (data->m4u_plat == M4U_MT8173) + if (data->plat_data->m4u_plat == M4U_MT8173) writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, @@ -584,7 +584,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (!data) return -ENOMEM; data->dev = dev; - data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); + data->plat_data = of_device_get_match_data(dev); /* Protect memory. HW will access here while translation fault.*/ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); @@ -732,9 +732,17 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) }; +static const struct mtk_iommu_plat_data mt2712_data = { + .m4u_plat = M4U_MT2712, +}; + +static const struct mtk_iommu_plat_data mt8173_data = { + .m4u_plat = M4U_MT8173, +}; + static const struct of_device_id mtk_iommu_of_ids[] = { - { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, - { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, + { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, + { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, {} }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 59337323db58..9725b083c62f 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -32,6 +32,10 @@ enum mtk_iommu_plat { M4U_MT8173, }; +struct mtk_iommu_plat_data { + enum mtk_iommu_plat m4u_plat; +}; + struct mtk_iommu_domain; struct mtk_iommu_data { @@ -48,7 +52,7 @@ struct mtk_iommu_data { bool tlb_flush_active; struct iommu_device iommu; - enum mtk_iommu_plat m4u_plat; + const struct mtk_iommu_plat_data *plat_data; struct list_head list; }; -- cgit v1.2.3 From 2e9b0908f5aa26336b2a39091db1a10402054128 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:48 +0800 Subject: memory: mtk-smi: Use a general config_port interface The config_port of mt2712 and mt8183 are the same. Use a general config_port interface instead. In addition, in mt2712, larb8 and larb9 are the bdpsys larbs which are not the normal larb, their register space are different from the normal one. thus, we can not call the general config_port. In mt8183, IPU0/1 and CCU connect with smi-common directly, they also are not the normal larb. Hence, we add a "larb_direct_to_common_mask" for these larbs which connect to smi-commmon directly. This is also a preparing patch for adding mt8183 SMI support. Signed-off-by: Yong Wu Reviewed-by: Matthias Brugger Reviewed-by: Evan Green Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 42ab43affbff..14f70cf00210 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -45,6 +45,7 @@ struct mtk_smi_larb_gen { bool need_larbid; int port_in_larb[MTK_LARB_NR_MAX + 1]; void (*config_port)(struct device *); + unsigned int larb_direct_to_common_mask; }; struct mtk_smi { @@ -168,17 +169,13 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } -static void mtk_smi_larb_config_port_mt2712(struct device *dev) +static void mtk_smi_larb_config_port_gen2_general(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); u32 reg; int i; - /* - * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly. - * Don't need to set it again. - */ - if (larb->larbid == 8 || larb->larbid == 9) + if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask) return; for_each_set_bit(i, (unsigned long *)larb->mmu, 32) { @@ -253,7 +250,8 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { .need_larbid = true, - .config_port = mtk_smi_larb_config_port_mt2712, + .config_port = mtk_smi_larb_config_port_gen2_general, + .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */ }; static const struct of_device_id mtk_smi_larb_of_ids[] = { -- cgit v1.2.3 From 42d42c76f8d2225aaed55997949d37424906e188 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:49 +0800 Subject: memory: mtk-smi: Use a struct for the platform data for smi-common Use a struct as the platform special data instead of the enumeration. Also there is a minor change that moving the position of "enum mtk_smi_gen" definition, this is because we expect define "struct mtk_smi_common_plat" before it is referred. This is a preparing patch for mt8183. Signed-off-by: Yong Wu Reviewed-by: Matthias Brugger Reviewed-by: Evan Green Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 14f70cf00210..47df7d04009f 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -41,6 +41,15 @@ #define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4)) #define F_MMU_EN BIT(0) +enum mtk_smi_gen { + MTK_SMI_GEN1, + MTK_SMI_GEN2 +}; + +struct mtk_smi_common_plat { + enum mtk_smi_gen gen; +}; + struct mtk_smi_larb_gen { bool need_larbid; int port_in_larb[MTK_LARB_NR_MAX + 1]; @@ -53,6 +62,8 @@ struct mtk_smi { struct clk *clk_apb, *clk_smi; struct clk *clk_async; /*only needed by mt2701*/ void __iomem *smi_ao_base; + + const struct mtk_smi_common_plat *plat; }; struct mtk_smi_larb { /* larb: local arbiter */ @@ -64,11 +75,6 @@ struct mtk_smi_larb { /* larb: local arbiter */ u32 *mmu; }; -enum mtk_smi_gen { - MTK_SMI_GEN1, - MTK_SMI_GEN2 -}; - static int mtk_smi_enable(const struct mtk_smi *smi) { int ret; @@ -343,18 +349,26 @@ static struct platform_driver mtk_smi_larb_driver = { } }; +static const struct mtk_smi_common_plat mtk_smi_common_gen1 = { + .gen = MTK_SMI_GEN1, +}; + +static const struct mtk_smi_common_plat mtk_smi_common_gen2 = { + .gen = MTK_SMI_GEN2, +}; + static const struct of_device_id mtk_smi_common_of_ids[] = { { .compatible = "mediatek,mt8173-smi-common", - .data = (void *)MTK_SMI_GEN2 + .data = &mtk_smi_common_gen2, }, { .compatible = "mediatek,mt2701-smi-common", - .data = (void *)MTK_SMI_GEN1 + .data = &mtk_smi_common_gen1, }, { .compatible = "mediatek,mt2712-smi-common", - .data = (void *)MTK_SMI_GEN2 + .data = &mtk_smi_common_gen2, }, {} }; @@ -364,13 +378,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mtk_smi *common; struct resource *res; - enum mtk_smi_gen smi_gen; int ret; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) return -ENOMEM; common->dev = dev; + common->plat = of_device_get_match_data(dev); common->clk_apb = devm_clk_get(dev, "apb"); if (IS_ERR(common->clk_apb)) @@ -386,8 +400,7 @@ static int mtk_smi_common_probe(struct platform_device *pdev) * clock into emi clock domain, but for mtk smi gen2, there's no smi ao * base. */ - smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev); - if (smi_gen == MTK_SMI_GEN1) { + if (common->plat->gen == MTK_SMI_GEN1) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); common->smi_ao_base = devm_ioremap_resource(dev, res); if (IS_ERR(common->smi_ao_base)) -- cgit v1.2.3 From 76ce65464fcd2c21db84391572b7938b716aceb0 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:50 +0800 Subject: iommu/mediatek: Fix iova_to_phys PA start for 4GB mode In M4U 4GB mode, the physical address is remapped as below: CPU Physical address: ==================== 0 1G 2G 3G 4G 5G |---A---|---B---|---C---|---D---|---E---| +--I/O--+------------Memory-------------+ IOMMU output physical address: ============================= 4G 5G 6G 7G 8G |---E---|---B---|---C---|---D---| +------------Memory-------------+ The Region 'A'(I/O) can not be mapped by M4U; For Region 'B'/'C'/'D', the bit32 of the CPU physical address always is needed to set, and for Region 'E', the CPU physical address keep as is. something looks like this: CPU PA -> M4U OUTPUT PA 0x4000_0000 0x1_4000_0000 (Add bit32) 0x8000_0000 0x1_8000_0000 ... 0xc000_0000 0x1_c000_0000 ... 0x1_0000_0000 0x1_0000_0000 (No change) Additionally, the iommu consumers always use the CPU phyiscal address. The PA in the iova_to_phys that is got from v7s always is u32, But from the CPU point of view, PA only need add BIT(32) when PA < 0x4000_0000. Fixes: 30e2fccf9512 ("iommu/mediatek: Enlarge the validate PA range for 4GB mode") Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c6e6dc343771..9ba270620767 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -107,6 +107,30 @@ struct mtk_iommu_domain { static const struct iommu_ops mtk_iommu_ops; +/* + * In M4U 4GB mode, the physical address is remapped as below: + * + * CPU Physical address: + * ==================== + * + * 0 1G 2G 3G 4G 5G + * |---A---|---B---|---C---|---D---|---E---| + * +--I/O--+------------Memory-------------+ + * + * IOMMU output physical address: + * ============================= + * + * 4G 5G 6G 7G 8G + * |---E---|---B---|---C---|---D---| + * +------------Memory-------------+ + * + * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the + * bit32 of the CPU physical address always is needed to set, and for Region + * 'E', the CPU physical address keep as is. + * Additionally, The iommu consumers always use the CPU phyiscal address. + */ +#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000 + static LIST_HEAD(m4ulist); /* List all the M4U HWs */ #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) @@ -401,7 +425,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, pa = dom->iop->iova_to_phys(dom->iop, iova); spin_unlock_irqrestore(&dom->pgtlock, flags); - if (data->enable_4GB) + if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE) pa |= BIT_ULL(32); return pa; -- cgit v1.2.3 From 5950b9541b509037aeb9e7b95f88a692bf3f46a3 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:51 +0800 Subject: iommu/io-pgtable-arm-v7s: Add paddr_to_iopte and iopte_to_paddr helpers Add two helper functions: paddr_to_iopte and iopte_to_paddr. Signed-off-by: Yong Wu Reviewed-by: Robin Murphy Reviewed-by: Evan Green Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 45 ++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 0fc8dfab2abf..72f1880e0c9d 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -169,18 +169,38 @@ struct arm_v7s_io_pgtable { spinlock_t split_lock; }; +static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl); + static dma_addr_t __arm_v7s_dma_addr(void *pages) { return (dma_addr_t)virt_to_phys(pages); } -static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) +static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl, + struct io_pgtable_cfg *cfg) { + return paddr & ARM_V7S_LVL_MASK(lvl); +} + +static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, + struct io_pgtable_cfg *cfg) +{ + arm_v7s_iopte mask; + if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) - pte &= ARM_V7S_TABLE_MASK; + mask = ARM_V7S_TABLE_MASK; + else if (arm_v7s_pte_is_cont(pte, lvl)) + mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES; else - pte &= ARM_V7S_LVL_MASK(lvl); - return phys_to_virt(pte); + mask = ARM_V7S_LVL_MASK(lvl); + + return pte & mask; +} + +static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl, + struct arm_v7s_io_pgtable *data) +{ + return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg)); } static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, @@ -396,7 +416,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, if (num_entries > 1) pte = arm_v7s_pte_to_cont(pte, lvl); - pte |= paddr & ARM_V7S_LVL_MASK(lvl); + pte |= paddr_to_iopte(paddr, lvl, cfg); __arm_v7s_set_pte(ptep, pte, num_entries, cfg); return 0; @@ -462,7 +482,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, } if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { - cptep = iopte_deref(pte, lvl); + cptep = iopte_deref(pte, lvl, data); } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); @@ -512,7 +532,8 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop) arm_v7s_iopte pte = data->pgd[i]; if (ARM_V7S_PTE_IS_TABLE(pte, 1)) - __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); + __arm_v7s_free_table(iopte_deref(pte, 1, data), + 2, data); } __arm_v7s_free_table(data->pgd, 1, data); kmem_cache_destroy(data->l2_tables); @@ -582,7 +603,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) return 0; - tablep = iopte_deref(pte, 1); + tablep = iopte_deref(pte, 1, data); return __arm_v7s_unmap(data, iova, size, 2, tablep); } @@ -641,7 +662,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, io_pgtable_tlb_add_flush(iop, iova, blk_size, ARM_V7S_BLOCK_SIZE(lvl + 1), false); io_pgtable_tlb_sync(iop); - ptep = iopte_deref(pte[i], lvl); + ptep = iopte_deref(pte[i], lvl, data); __arm_v7s_free_table(ptep, lvl + 1, data); } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { /* @@ -666,7 +687,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, } /* Keep on walkin' */ - ptep = iopte_deref(pte[0], lvl); + ptep = iopte_deref(pte[0], lvl, data); return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); } @@ -692,7 +713,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, do { ptep += ARM_V7S_LVL_IDX(iova, ++lvl); pte = READ_ONCE(*ptep); - ptep = iopte_deref(pte, lvl); + ptep = iopte_deref(pte, lvl, data); } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); if (!ARM_V7S_PTE_IS_VALID(pte)) @@ -701,7 +722,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, mask = ARM_V7S_LVL_MASK(lvl); if (arm_v7s_pte_is_cont(pte, lvl)) mask *= ARM_V7S_CONT_PAGES; - return (pte & mask) | (iova & ~mask); + return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask); } static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, -- cgit v1.2.3 From 7f315c9da9f5e5f9ba720ca4b5e04e5ee91833f9 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:52 +0800 Subject: iommu/io-pgtable-arm-v7s: Use ias/oas to check the valid iova/pa Use ias/oas to check the valid iova/pa. Synchronize this checking with io-pgtable-arm.c. Signed-off-by: Yong Wu Acked-by: Will Deacon Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 72f1880e0c9d..fa1b38f4c271 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -504,7 +504,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return 0; - if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || + paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); -- cgit v1.2.3 From 73d50811bc91d2a173213a78b6b43ac762f6cc54 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:53 +0800 Subject: iommu/io-pgtable-arm-v7s: Rename the quirk from MTK_4GB to MTK_EXT In previous mt2712/mt8173, MediaTek extend the v7s to support 4GB dram. But in the latest mt8183, We extend it to support the PA up to 34bit. Then the "MTK_4GB" name is not so fit, This patch only change the quirk name to "MTK_EXT". Signed-off-by: Yong Wu Acked-by: Will Deacon Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 6 +++--- drivers/iommu/mtk_iommu.c | 2 +- include/linux/io-pgtable.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index fa1b38f4c271..77cc1eb1243b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -315,7 +315,7 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) pte |= ARM_V7S_ATTR_NS_SECTION; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT) pte |= ARM_V7S_ATTR_MTK_4GB; return pte; @@ -737,12 +737,12 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | - IO_PGTABLE_QUIRK_ARM_MTK_4GB | + IO_PGTABLE_QUIRK_ARM_MTK_EXT | IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB && + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) return NULL; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 9ba270620767..62edce75c366 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -296,7 +296,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) }; if (data->enable_4GB) - dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; + dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_EXT; dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); if (!dom->iop) { diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b5a450a3bb47..915fb7303aa3 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -65,7 +65,7 @@ struct io_pgtable_cfg { * (unmapped) entries but the hardware might do so anyway, perform * TLB maintenance when mapping as well as when unmapping. * - * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all + * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) Set bit 9 in all * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). @@ -77,7 +77,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) - #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) + #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; -- cgit v1.2.3 From 4c019de653237674d38cf2b3119153b144ffe173 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:54 +0800 Subject: iommu/io-pgtable-arm-v7s: Extend to support PA[33:32] for MediaTek MediaTek extend the arm v7s descriptor to support up to 34 bits PA where the bit32 and bit33 are encoded in the bit9 and bit4 of the PTE respectively. Meanwhile the iova still is 32bits. Regarding whether the pagetable address could be over 4GB, the mt8183 support it while the previous mt8173 don't, thus keep it as is. Signed-off-by: Yong Wu Acked-by: Will Deacon Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 40 +++++++++++++++++++++++++++++++------- include/linux/io-pgtable.h | 7 +++---- 2 files changed, 36 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 77cc1eb1243b..545287148147 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -112,7 +112,9 @@ #define ARM_V7S_TEX_MASK 0x7 #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) -#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */ +/* MediaTek extend the two bits for PA 32bit/33bit */ +#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9) +#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4) /* *well, except for TEX on level 2 large pages, of course :( */ #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 @@ -176,16 +178,32 @@ static dma_addr_t __arm_v7s_dma_addr(void *pages) return (dma_addr_t)virt_to_phys(pages); } +static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg) +{ + return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && + (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT); +} + static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl, struct io_pgtable_cfg *cfg) { - return paddr & ARM_V7S_LVL_MASK(lvl); + arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl); + + if (!arm_v7s_is_mtk_enabled(cfg)) + return pte; + + if (paddr & BIT_ULL(32)) + pte |= ARM_V7S_ATTR_MTK_PA_BIT32; + if (paddr & BIT_ULL(33)) + pte |= ARM_V7S_ATTR_MTK_PA_BIT33; + return pte; } static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, struct io_pgtable_cfg *cfg) { arm_v7s_iopte mask; + phys_addr_t paddr; if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) mask = ARM_V7S_TABLE_MASK; @@ -194,7 +212,15 @@ static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, else mask = ARM_V7S_LVL_MASK(lvl); - return pte & mask; + paddr = pte & mask; + if (!arm_v7s_is_mtk_enabled(cfg)) + return paddr; + + if (pte & ARM_V7S_ATTR_MTK_PA_BIT32) + paddr |= BIT_ULL(32); + if (pte & ARM_V7S_ATTR_MTK_PA_BIT33) + paddr |= BIT_ULL(33); + return paddr; } static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl, @@ -315,9 +341,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) pte |= ARM_V7S_ATTR_NS_SECTION; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT) - pte |= ARM_V7S_ATTR_MTK_4GB; - return pte; } @@ -731,7 +754,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, { struct arm_v7s_io_pgtable *data; - if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) + if (cfg->ias > ARM_V7S_ADDR_BITS) + return NULL; + + if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS)) return NULL; if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 915fb7303aa3..a2a52c349fe4 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -65,10 +65,9 @@ struct io_pgtable_cfg { * (unmapped) entries but the hardware might do so anyway, perform * TLB maintenance when mapping as well as when unmapping. * - * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) Set bit 9 in all - * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit - * when the SoC is in "4GB mode" and they can only access the high - * remap of DRAM (0x1_00000000 to 0x1_ffffffff). + * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend + * to support up to 34 bits PA where the bit32 and bit33 are + * encoded in the bit9 and bit4 of the PTE respectively. * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for -- cgit v1.2.3 From b4dad40e4f35bbf2393f35f4492acf799eb8136d Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:55 +0800 Subject: iommu/mediatek: Adjust the PA for the 4GB Mode After extending the v7s support PA[33:32] for MediaTek, we have to adjust the PA ourself for the 4GB mode. In the 4GB Mode, the PA will remap like this: CPU PA -> M4U output PA 0x4000_0000 0x1_4000_0000 (Add bit32) 0x8000_0000 0x1_8000_0000 ... 0xc000_0000 0x1_c000_0000 ... 0x1_0000_0000 0x1_0000_0000 (No change) 1) Always add bit32 for CPU PA in ->map. 2) Discard the bit32 in iova_to_phys if PA > 0x1_4000_0000 since the iommu consumer always use the CPU PA. Besides, the "oas" always is set to 34 since v7s has already supported our case. Both mt2712 and mt8173 support this "4GB mode" while the mt8183 don't. The PA in mt8183 won't remap. Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 26 ++++++++++++++++---------- drivers/iommu/mtk_iommu.h | 1 + 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 62edce75c366..b5a40a4b40c5 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -129,7 +129,7 @@ static const struct iommu_ops mtk_iommu_ops; * 'E', the CPU physical address keep as is. * Additionally, The iommu consumers always use the CPU phyiscal address. */ -#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000 +#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL static LIST_HEAD(m4ulist); /* List all the M4U HWs */ @@ -287,17 +287,15 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) dom->cfg = (struct io_pgtable_cfg) { .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | - IO_PGTABLE_QUIRK_TLBI_ON_MAP, + IO_PGTABLE_QUIRK_TLBI_ON_MAP | + IO_PGTABLE_QUIRK_ARM_MTK_EXT, .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .ias = 32, - .oas = 32, + .oas = 34, .tlb = &mtk_iommu_gather_ops, .iommu_dev = data->dev, }; - if (data->enable_4GB) - dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_EXT; - dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); if (!dom->iop) { dev_err(data->dev, "Failed to alloc io pgtable\n"); @@ -383,12 +381,16 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); unsigned long flags; int ret; + /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ + if (data->enable_4GB) + paddr |= BIT_ULL(32); + spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), - size, prot); + ret = dom->iop->map(dom->iop, iova, paddr, size, prot); spin_unlock_irqrestore(&dom->pgtlock, flags); return ret; @@ -425,8 +427,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, pa = dom->iop->iova_to_phys(dom->iop, iova); spin_unlock_irqrestore(&dom->pgtlock, flags); - if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE) - pa |= BIT_ULL(32); + if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) + pa &= ~BIT_ULL(32); return pa; } @@ -618,6 +620,8 @@ static int mtk_iommu_probe(struct platform_device *pdev) /* Whether the current dram is over 4GB */ data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); + if (!data->plat_data->has_4gb_mode) + data->enable_4GB = false; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->base = devm_ioremap_resource(dev, res); @@ -758,10 +762,12 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, + .has_4gb_mode = true, }; static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, + .has_4gb_mode = true, }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9725b083c62f..c281c01b2d5b 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -34,6 +34,7 @@ enum mtk_iommu_plat { struct mtk_iommu_plat_data { enum mtk_iommu_plat m4u_plat; + bool has_4gb_mode; }; struct mtk_iommu_domain; -- cgit v1.2.3 From 2aa4c2597cd67139697eb3095d5b6d61074c7ffc Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:56 +0800 Subject: iommu/mediatek: Add bclk can be supported optionally In some SoCs, M4U doesn't have its "bclk", it will use the EMI clock instead which has always been enabled when entering kernel. Currently mt2712 and mt8173 have this bclk while mt8183 doesn't. This also is a preparing patch for mt8183. Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 10 +++++++--- drivers/iommu/mtk_iommu.h | 3 +++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index b5a40a4b40c5..4df3cb430b43 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -633,9 +633,11 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (data->irq < 0) return data->irq; - data->bclk = devm_clk_get(dev, "bclk"); - if (IS_ERR(data->bclk)) - return PTR_ERR(data->bclk); + if (data->plat_data->has_bclk) { + data->bclk = devm_clk_get(dev, "bclk"); + if (IS_ERR(data->bclk)) + return PTR_ERR(data->bclk); + } larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); @@ -763,11 +765,13 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, .has_4gb_mode = true, + .has_bclk = true, }; static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, .has_4gb_mode = true, + .has_bclk = true, }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index c281c01b2d5b..821172bd6c52 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -35,6 +35,9 @@ enum mtk_iommu_plat { struct mtk_iommu_plat_data { enum mtk_iommu_plat m4u_plat; bool has_4gb_mode; + + /* HW will use the EMI clock if there isn't the "bclk". */ + bool has_bclk; }; struct mtk_iommu_domain; -- cgit v1.2.3 From b3e5eee76548f621bb1c3e9dab4a4ff98bebf0de Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:57 +0800 Subject: iommu/mediatek: Add larb-id remapped support The larb-id may be remapped in the smi-common, this means the larb-id reported in the mtk_iommu_isr isn't the real larb-id, Take mt8183 as a example: M4U | --------------------------------------------- | SMI common | -0-----7-----5-----6-----1-----2------3-----4- <- Id remapped | | | | | | | | larb0 larb1 IPU0 IPU1 larb4 larb5 larb6 CCU disp vdec img cam venc img cam As above, larb0 connects with the id 0 in smi-common. larb1 connects with the id 7 in smi-common. ... If the larb-id reported in the isr is 7, actually it's larb1(vdec). In order to output the right larb-id in the isr, we add a larb-id remapping relationship in this patch. If there is no this larb-id remapping in some SoCs, use the linear mapping array instead. This also is a preparing patch for mt8183. Signed-off-by: Yong Wu Reviewed-by: Nicolas Boichat Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 4 ++++ drivers/iommu/mtk_iommu.h | 2 ++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 4df3cb430b43..34f020367ebe 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -236,6 +236,8 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) fault_larb = F_MMU0_INT_ID_LARB_ID(regval); fault_port = F_MMU0_INT_ID_PORT_ID(regval); + fault_larb = data->plat_data->larbid_remap[fault_larb]; + if (report_iommu_fault(&dom->domain, data->dev, fault_iova, write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { dev_err_ratelimited( @@ -766,12 +768,14 @@ static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, .has_4gb_mode = true, .has_bclk = true, + .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, }; static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, .has_4gb_mode = true, .has_bclk = true, + .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 821172bd6c52..d1a1d8887a0e 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -38,6 +38,8 @@ struct mtk_iommu_plat_data { /* HW will use the EMI clock if there isn't the "bclk". */ bool has_bclk; + + unsigned char larbid_remap[MTK_LARB_NR_MAX]; }; struct mtk_iommu_domain; -- cgit v1.2.3 From acb3c92a61306a8bc3b6bb8ed72977201affdd9a Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:58 +0800 Subject: iommu/mediatek: Refine protect memory definition The protect memory setting is a little different in the different SoCs. In the register REG_MMU_CTRL_REG(0x110), the TF_PROT(translation fault protect) shift bit is normally 4 while it shift 5 bits only in the mt8173. This patch delete the complex MACRO and use a common if-else instead. Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 34f020367ebe..947a8c6b87dc 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -44,12 +44,9 @@ #define REG_MMU_DCM_DIS 0x050 #define REG_MMU_CTRL_REG 0x110 +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) -#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ - ((data)->plat_data->m4u_plat == M4U_MT2712 ? 4 : 5) -/* It's named by F_MMU_TF_PROT_SEL in mt2712. */ -#define F_MMU_TF_PROTECT_SEL(prot, data) \ - (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) #define REG_MMU_IVRP_PADDR 0x114 @@ -539,9 +536,11 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return ret; } - regval = F_MMU_TF_PROTECT_SEL(2, data); if (data->plat_data->m4u_plat == M4U_MT8173) - regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; + regval = F_MMU_PREFETCH_RT_REPLACE_MOD | + F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; + else + regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); regval = F_L2_MULIT_HIT_EN | -- cgit v1.2.3 From 50822b0b948fabfc8bcee9a89d031c276b135506 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:01:59 +0800 Subject: iommu/mediatek: Move reset_axi into plat_data In mt8173 and mt8183, 0x48 is REG_MMU_STANDARD_AXI_MODE while it is REG_MMU_CTRL in the other SoCs, and the bits meaning is completely different with the REG_MMU_STANDARD_AXI_MODE. This patch moves this property to plat_data, it's also a preparing patch for mt8183. Signed-off-by: Yong Wu Reviewed-by: Nicolas Boichat Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 4 ++-- drivers/iommu/mtk_iommu.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 947a8c6b87dc..b43f36aba0a1 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -577,8 +577,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) } writel_relaxed(0, data->base + REG_MMU_DCM_DIS); - /* It's MISC control register whose default value is ok except mt8173.*/ - if (data->plat_data->m4u_plat == M4U_MT8173) + if (data->plat_data->reset_axi) writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, @@ -774,6 +773,7 @@ static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, .has_4gb_mode = true, .has_bclk = true, + .reset_axi = true, .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index d1a1d8887a0e..8d3b525b8752 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -38,7 +38,7 @@ struct mtk_iommu_plat_data { /* HW will use the EMI clock if there isn't the "bclk". */ bool has_bclk; - + bool reset_axi; unsigned char larbid_remap[MTK_LARB_NR_MAX]; }; -- cgit v1.2.3 From 2b326d8b1d2b443c3dad96f54c6b279559f87751 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:00 +0800 Subject: iommu/mediatek: Move vld_pa_rng into plat_data Both mt8173 and mt8183 don't have this vld_pa_rng(valid physical address range) register while mt2712 have. Move it into the plat_data. Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 3 ++- drivers/iommu/mtk_iommu.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index b43f36aba0a1..eaf6a23f0248 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -567,7 +567,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) upper_32_bits(data->protect_base); writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); - if (data->enable_4GB && data->plat_data->m4u_plat != M4U_MT8173) { + if (data->enable_4GB && data->plat_data->has_vld_pa_rng) { /* * If 4GB mode is enabled, the validate PA range is from * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. @@ -766,6 +766,7 @@ static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, .has_4gb_mode = true, .has_bclk = true, + .has_vld_pa_rng = true, .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 8d3b525b8752..973d6e077e43 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -38,6 +38,7 @@ struct mtk_iommu_plat_data { /* HW will use the EMI clock if there isn't the "bclk". */ bool has_bclk; + bool has_vld_pa_rng; bool reset_axi; unsigned char larbid_remap[MTK_LARB_NR_MAX]; }; -- cgit v1.2.3 From 64fea74a0d205ac9aff39997e5fe9d64115a4bd3 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:01 +0800 Subject: memory: mtk-smi: Add gals support In some SoCs like mt8183, SMI add GALS(Global Async Local Sync) module which can help synchronize for the modules in different clock frequency. It can be seen as a "asynchronous fifo". This is a example diagram: M4U | ---------- | | gals0-rx gals1-rx | | | | gals0-tx gals1-tx | | ------------ SMI Common ------------ | +-----+--------+-----+- ... | | | | | gals-rx gals-rx | | | | | | | | | | gals-tx gals-tx | | | | | larb1 larb2 larb3 larb4 GALS only help transfer the command/data while it doesn't have the configuring register, thus it has the special "smi" clock and doesn't have the "apb" clock. From the diagram above, we add "gals0" and "gals1" clocks for smi-common and add a "gals" clock for smi-larb. This patch adds gals clock supporting in the SMI. Note that some larbs may still don't have the "gals" clock like larb1 and larb4 above. This is also a preparing patch for mt8183 which has GALS. CC: Matthias Brugger Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 47df7d04009f..53bd379e1935 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -48,6 +48,7 @@ enum mtk_smi_gen { struct mtk_smi_common_plat { enum mtk_smi_gen gen; + bool has_gals; }; struct mtk_smi_larb_gen { @@ -55,11 +56,13 @@ struct mtk_smi_larb_gen { int port_in_larb[MTK_LARB_NR_MAX + 1]; void (*config_port)(struct device *); unsigned int larb_direct_to_common_mask; + bool has_gals; }; struct mtk_smi { struct device *dev; struct clk *clk_apb, *clk_smi; + struct clk *clk_gals0, *clk_gals1; struct clk *clk_async; /*only needed by mt2701*/ void __iomem *smi_ao_base; @@ -91,8 +94,20 @@ static int mtk_smi_enable(const struct mtk_smi *smi) if (ret) goto err_disable_apb; + ret = clk_prepare_enable(smi->clk_gals0); + if (ret) + goto err_disable_smi; + + ret = clk_prepare_enable(smi->clk_gals1); + if (ret) + goto err_disable_gals0; + return 0; +err_disable_gals0: + clk_disable_unprepare(smi->clk_gals0); +err_disable_smi: + clk_disable_unprepare(smi->clk_smi); err_disable_apb: clk_disable_unprepare(smi->clk_apb); err_put_pm: @@ -102,6 +117,8 @@ err_put_pm: static void mtk_smi_disable(const struct mtk_smi *smi) { + clk_disable_unprepare(smi->clk_gals1); + clk_disable_unprepare(smi->clk_gals0); clk_disable_unprepare(smi->clk_smi); clk_disable_unprepare(smi->clk_apb); pm_runtime_put_sync(smi->dev); @@ -302,6 +319,15 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) larb->smi.clk_smi = devm_clk_get(dev, "smi"); if (IS_ERR(larb->smi.clk_smi)) return PTR_ERR(larb->smi.clk_smi); + + if (larb->larb_gen->has_gals) { + /* The larbs may still haven't gals even if the SoC support.*/ + larb->smi.clk_gals0 = devm_clk_get(dev, "gals"); + if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT) + larb->smi.clk_gals0 = NULL; + else if (IS_ERR(larb->smi.clk_gals0)) + return PTR_ERR(larb->smi.clk_gals0); + } larb->smi.dev = dev; if (larb->larb_gen->need_larbid) { @@ -394,6 +420,16 @@ static int mtk_smi_common_probe(struct platform_device *pdev) if (IS_ERR(common->clk_smi)) return PTR_ERR(common->clk_smi); + if (common->plat->has_gals) { + common->clk_gals0 = devm_clk_get(dev, "gals0"); + if (IS_ERR(common->clk_gals0)) + return PTR_ERR(common->clk_gals0); + + common->clk_gals1 = devm_clk_get(dev, "gals1"); + if (IS_ERR(common->clk_gals1)) + return PTR_ERR(common->clk_gals1); + } + /* * for mtk smi gen 1, we need to get the ao(always on) base to config * m4u port, and we need to enable the aync clock for transform the smi -- cgit v1.2.3 From 907ba6a195991adb2a3edf4aff0d8dbb308d4d97 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:02 +0800 Subject: iommu/mediatek: Add mt8183 IOMMU support The M4U IP blocks in mt8183 is MediaTek's generation2 M4U which use the ARM Short-descriptor like mt8173, and most of the HW registers are the same. Here list main differences between mt8183 and mt8173/mt2712: 1) mt8183 has only one M4U HW like mt8173 while mt2712 has two. 2) mt8183 don't have the "bclk" clock, it use the EMI clock instead. 3) mt8183 can support the dram over 4GB, but it doesn't call this "4GB mode". 4) mt8183 pgtable base register(0x0) extend bit[1:0] which represent the bit[33:32] in the physical address of the pgtable base, But the standard ttbr0[1] means the S bit which is enabled defaultly, Hence, we add a mask. 5) mt8183 HW has a GALS modules, SMI should enable "has_gals" support. 6) mt8183 need reset_axi like mt8173. 7) the larb-id in smi-common is remapped. M4U should add its larbid_remap. Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 15 ++++++++++++--- drivers/iommu/mtk_iommu.h | 1 + drivers/memory/mtk-smi.c | 20 ++++++++++++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index eaf6a23f0248..ee3a664beeb3 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -28,6 +28,7 @@ #include "mtk_iommu.h" #define REG_MMU_PT_BASE_ADDR 0x000 +#define MMU_PT_ADDR_MASK GENMASK(31, 7) #define REG_MMU_INVALIDATE 0x020 #define F_ALL_INVLD 0x2 @@ -357,7 +358,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, /* Update the pgtable base address register of the M4U HW */ if (!data->m4u_dom) { data->m4u_dom = dom; - writel(dom->cfg.arm_v7s_cfg.ttbr[0], + writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK, data->base + REG_MMU_PT_BASE_ADDR); } @@ -737,6 +738,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_suspend_reg *reg = &data->reg; + struct mtk_iommu_domain *m4u_dom = data->m4u_dom; void __iomem *base = data->base; int ret; @@ -752,8 +754,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); - if (data->m4u_dom) - writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], + if (m4u_dom) + writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); return 0; } @@ -778,9 +780,16 @@ static const struct mtk_iommu_plat_data mt8173_data = { .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ }; +static const struct mtk_iommu_plat_data mt8183_data = { + .m4u_plat = M4U_MT8183, + .reset_axi = true, + .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1}, +}; + static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, + { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, {} }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 973d6e077e43..6b1f833696a6 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -30,6 +30,7 @@ enum mtk_iommu_plat { M4U_MT2701, M4U_MT2712, M4U_MT8173, + M4U_MT8183, }; struct mtk_iommu_plat_data { diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 53bd379e1935..3dd05de50c6c 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -277,6 +277,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */ }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = { + .has_gals = true, + .config_port = mtk_smi_larb_config_port_gen2_general, + .larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7), + /* IPU0 | IPU1 | CCU */ +}; + static const struct of_device_id mtk_smi_larb_of_ids[] = { { .compatible = "mediatek,mt8173-smi-larb", @@ -290,6 +297,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { .compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712 }, + { + .compatible = "mediatek,mt8183-smi-larb", + .data = &mtk_smi_larb_mt8183 + }, {} }; @@ -383,6 +394,11 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = { .gen = MTK_SMI_GEN2, }; +static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { + .gen = MTK_SMI_GEN2, + .has_gals = true, +}; + static const struct of_device_id mtk_smi_common_of_ids[] = { { .compatible = "mediatek,mt8173-smi-common", @@ -396,6 +412,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { .compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2, }, + { + .compatible = "mediatek,mt8183-smi-common", + .data = &mtk_smi_common_mt8183, + }, {} }; -- cgit v1.2.3 From 15a01f4c60607ad888faf0386070181f9c97577f Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:03 +0800 Subject: iommu/mediatek: Add mmu1 support Normally the M4U HW connect EMI with smi. the diagram is like below: EMI | M4U | smi-common | ----------------- | | | | ... larb0 larb1 larb2 larb3 Actually there are 2 mmu cells in the M4U HW, like this diagram: EMI --------- | | mmu0 mmu1 <- M4U | | --------- | smi-common | ----------------- | | | | ... larb0 larb1 larb2 larb3 This patch add support for mmu1. In order to get better performance, we could adjust some larbs go to mmu1 while the others still go to mmu0. This is controlled by a SMI COMMON register SMI_BUS_SEL(0x220). mt2712, mt8173 and mt8183 M4U HW all have 2 mmu cells. the default value of that register is 0 which means all the larbs go to mmu0 defaultly. This is a preparing patch for adjusting SMI_BUS_SEL for mt8183. Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index ee3a664beeb3..470de8bf576f 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -64,26 +64,32 @@ #define F_INT_CLR_BIT BIT(12) #define REG_MMU_INT_MAIN_CONTROL 0x124 -#define F_INT_TRANSLATION_FAULT BIT(0) -#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) -#define F_INT_INVALID_PA_FAULT BIT(2) -#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) -#define F_INT_TLB_MISS_FAULT BIT(4) -#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) -#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) + /* mmu0 | mmu1 */ +#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) +#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) +#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) +#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) +#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) +#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) +#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) #define REG_MMU_CPE_DONE 0x12C #define REG_MMU_FAULT_ST1 0x134 +#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) +#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) -#define REG_MMU_FAULT_VA 0x13c +#define REG_MMU0_FAULT_VA 0x13c #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) -#define REG_MMU_INVLD_PA 0x140 -#define REG_MMU_INT_ID 0x150 -#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) -#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) +#define REG_MMU0_INVLD_PA 0x140 +#define REG_MMU1_FAULT_VA 0x144 +#define REG_MMU1_INVLD_PA 0x148 +#define REG_MMU0_INT_ID 0x150 +#define REG_MMU1_INT_ID 0x154 +#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) +#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) #define MTK_PROTECT_PA_ALIGN 128 @@ -226,13 +232,19 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) /* Read error info from registers */ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); - fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); + if (int_state & F_REG_MMU0_FAULT_MASK) { + regval = readl_relaxed(data->base + REG_MMU0_INT_ID); + fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); + fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); + } else { + regval = readl_relaxed(data->base + REG_MMU1_INT_ID); + fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); + fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); + } layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; - fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); - regval = readl_relaxed(data->base + REG_MMU_INT_ID); - fault_larb = F_MMU0_INT_ID_LARB_ID(regval); - fault_port = F_MMU0_INT_ID_PORT_ID(regval); + fault_larb = F_MMU_INT_ID_LARB_ID(regval); + fault_port = F_MMU_INT_ID_PORT_ID(regval); fault_larb = data->plat_data->larbid_remap[fault_larb]; -- cgit v1.2.3 From 4f0a1a1ae35197e9b7a6d11b4e881bcfe973ad8f Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:04 +0800 Subject: memory: mtk-smi: Invoke pm runtime_callback to enable clocks This patch only move the clk_prepare_enable and config_port into the runtime suspend/resume callback. It doesn't change the code content and sequence. This is a preparing patch for adjusting SMI_BUS_SEL for mt8183. (SMI_BUS_SEL need to be restored after smi-common resume every time.) Also it gives a chance to get rid of mtk_smi_larb_get/put which could be a next topic. CC: Matthias Brugger Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 113 ++++++++++++++++++++++++++++++----------------- 1 file changed, 72 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 3dd05de50c6c..2bb55b864fa9 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -78,17 +78,13 @@ struct mtk_smi_larb { /* larb: local arbiter */ u32 *mmu; }; -static int mtk_smi_enable(const struct mtk_smi *smi) +static int mtk_smi_clk_enable(const struct mtk_smi *smi) { int ret; - ret = pm_runtime_get_sync(smi->dev); - if (ret < 0) - return ret; - ret = clk_prepare_enable(smi->clk_apb); if (ret) - goto err_put_pm; + return ret; ret = clk_prepare_enable(smi->clk_smi); if (ret) @@ -110,59 +106,28 @@ err_disable_smi: clk_disable_unprepare(smi->clk_smi); err_disable_apb: clk_disable_unprepare(smi->clk_apb); -err_put_pm: - pm_runtime_put_sync(smi->dev); return ret; } -static void mtk_smi_disable(const struct mtk_smi *smi) +static void mtk_smi_clk_disable(const struct mtk_smi *smi) { clk_disable_unprepare(smi->clk_gals1); clk_disable_unprepare(smi->clk_gals0); clk_disable_unprepare(smi->clk_smi); clk_disable_unprepare(smi->clk_apb); - pm_runtime_put_sync(smi->dev); } int mtk_smi_larb_get(struct device *larbdev) { - struct mtk_smi_larb *larb = dev_get_drvdata(larbdev); - const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen; - struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev); - int ret; + int ret = pm_runtime_get_sync(larbdev); - /* Enable the smi-common's power and clocks */ - ret = mtk_smi_enable(common); - if (ret) - return ret; - - /* Enable the larb's power and clocks */ - ret = mtk_smi_enable(&larb->smi); - if (ret) { - mtk_smi_disable(common); - return ret; - } - - /* Configure the iommu info for this larb */ - larb_gen->config_port(larbdev); - - return 0; + return (ret < 0) ? ret : 0; } EXPORT_SYMBOL_GPL(mtk_smi_larb_get); void mtk_smi_larb_put(struct device *larbdev) { - struct mtk_smi_larb *larb = dev_get_drvdata(larbdev); - struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev); - - /* - * Don't de-configure the iommu info for this larb since there may be - * several modules in this larb. - * The iommu info will be reset after power off. - */ - - mtk_smi_disable(&larb->smi); - mtk_smi_disable(common); + pm_runtime_put_sync(larbdev); } EXPORT_SYMBOL_GPL(mtk_smi_larb_put); @@ -377,12 +342,52 @@ static int mtk_smi_larb_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused mtk_smi_larb_resume(struct device *dev) +{ + struct mtk_smi_larb *larb = dev_get_drvdata(dev); + const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen; + int ret; + + /* Power on smi-common. */ + ret = pm_runtime_get_sync(larb->smi_common_dev); + if (ret < 0) { + dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret); + return ret; + } + + ret = mtk_smi_clk_enable(&larb->smi); + if (ret < 0) { + dev_err(dev, "Failed to enable clock(%d).\n", ret); + pm_runtime_put_sync(larb->smi_common_dev); + return ret; + } + + /* Configure the basic setting for this larb */ + larb_gen->config_port(dev); + + return 0; +} + +static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) +{ + struct mtk_smi_larb *larb = dev_get_drvdata(dev); + + mtk_smi_clk_disable(&larb->smi); + pm_runtime_put_sync(larb->smi_common_dev); + return 0; +} + +static const struct dev_pm_ops smi_larb_pm_ops = { + SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL) +}; + static struct platform_driver mtk_smi_larb_driver = { .probe = mtk_smi_larb_probe, .remove = mtk_smi_larb_remove, .driver = { .name = "mtk-smi-larb", .of_match_table = mtk_smi_larb_of_ids, + .pm = &smi_larb_pm_ops, } }; @@ -481,12 +486,38 @@ static int mtk_smi_common_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused mtk_smi_common_resume(struct device *dev) +{ + struct mtk_smi *common = dev_get_drvdata(dev); + int ret; + + ret = mtk_smi_clk_enable(common); + if (ret) { + dev_err(common->dev, "Failed to enable clock(%d).\n", ret); + return ret; + } + return 0; +} + +static int __maybe_unused mtk_smi_common_suspend(struct device *dev) +{ + struct mtk_smi *common = dev_get_drvdata(dev); + + mtk_smi_clk_disable(common); + return 0; +} + +static const struct dev_pm_ops smi_common_pm_ops = { + SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL) +}; + static struct platform_driver mtk_smi_common_driver = { .probe = mtk_smi_common_probe, .remove = mtk_smi_common_remove, .driver = { .name = "mtk-smi-common", .of_match_table = mtk_smi_common_of_ids, + .pm = &smi_common_pm_ops, } }; -- cgit v1.2.3 From 567e58cf96dda2cc1d8dbdafbc1c089b5ea2be25 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:05 +0800 Subject: memory: mtk-smi: Add bus_sel for mt8183 There are 2 mmu cells in a M4U HW. we could adjust some larbs entering mmu0 or mmu1 to balance the bandwidth via the smi-common register SMI_BUS_SEL(0x220)(Each larb occupy 2 bits). In mt8183, For better performance, we switch larb1/2/5/7 to enter mmu1 while the others still keep enter mmu0. In mt8173 and mt2712, we don't get the performance issue, Keep its default value(0x0), that means all the larbs enter mmu0. Note: smi gen1(mt2701/mt7623) don't have this bus_sel. And, the base of smi-common is completely different with smi_ao_base of gen1, thus I add new variable for that. CC: Matthias Brugger Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 2bb55b864fa9..289e59543a04 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -41,6 +41,12 @@ #define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4)) #define F_MMU_EN BIT(0) +/* SMI COMMON */ +#define SMI_BUS_SEL 0x220 +#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1) +/* All are MMU0 defaultly. Only specialize mmu1 here. */ +#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid)) + enum mtk_smi_gen { MTK_SMI_GEN1, MTK_SMI_GEN2 @@ -49,6 +55,7 @@ enum mtk_smi_gen { struct mtk_smi_common_plat { enum mtk_smi_gen gen; bool has_gals; + u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */ }; struct mtk_smi_larb_gen { @@ -64,8 +71,10 @@ struct mtk_smi { struct clk *clk_apb, *clk_smi; struct clk *clk_gals0, *clk_gals1; struct clk *clk_async; /*only needed by mt2701*/ - void __iomem *smi_ao_base; - + union { + void __iomem *smi_ao_base; /* only for gen1 */ + void __iomem *base; /* only for gen2 */ + }; const struct mtk_smi_common_plat *plat; }; @@ -402,6 +411,8 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = { static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .gen = MTK_SMI_GEN2, .has_gals = true, + .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) | + F_MMU1_LARB(7), }; static const struct of_device_id mtk_smi_common_of_ids[] = { @@ -474,6 +485,11 @@ static int mtk_smi_common_probe(struct platform_device *pdev) ret = clk_prepare_enable(common->clk_async); if (ret) return ret; + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + common->base = devm_ioremap_resource(dev, res); + if (IS_ERR(common->base)) + return PTR_ERR(common->base); } pm_runtime_enable(dev); platform_set_drvdata(pdev, common); @@ -489,6 +505,7 @@ static int mtk_smi_common_remove(struct platform_device *pdev) static int __maybe_unused mtk_smi_common_resume(struct device *dev) { struct mtk_smi *common = dev_get_drvdata(dev); + u32 bus_sel = common->plat->bus_sel; int ret; ret = mtk_smi_clk_enable(common); @@ -496,6 +513,9 @@ static int __maybe_unused mtk_smi_common_resume(struct device *dev) dev_err(common->dev, "Failed to enable clock(%d).\n", ret); return ret; } + + if (common->plat->gen == MTK_SMI_GEN2 && bus_sel) + writel(bus_sel, common->base + SMI_BUS_SEL); return 0; } -- cgit v1.2.3 From b9475b3471f81a60a727886230b0903be72dc96d Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:06 +0800 Subject: iommu/mediatek: Fix VLD_PA_RNG register backup when suspend The register VLD_PA_RNG(0x118) was forgot to backup while adding 4GB mode support for mt2712. this patch add it. Fixes: 30e2fccf9512 ("iommu/mediatek: Enlarge the validate PA range for 4GB mode") Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 2 ++ drivers/iommu/mtk_iommu.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 470de8bf576f..5d5341c85650 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -742,6 +742,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); + reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); clk_disable_unprepare(data->bclk); return 0; } @@ -766,6 +767,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); + writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); if (m4u_dom) writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 6b1f833696a6..56b579c5a088 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -24,6 +24,7 @@ struct mtk_iommu_suspend_reg { u32 int_control0; u32 int_main_control; u32 ivrp_paddr; + u32 vld_pa_rng; }; enum mtk_iommu_plat { -- cgit v1.2.3 From ec2da07ca1202552d87fb01b238d46642817da2b Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:07 +0800 Subject: memory: mtk-smi: Get rid of need_larbid The "mediatek,larb-id" has already been parsed in MTK IOMMU driver. It's no need to parse it again in SMI driver. Only clean some codes. This patch is fit for all the current mt2701, mt2712, mt7623, mt8173 and mt8183. After this patch, the "mediatek,larb-id" only be needed for mt2712 which have 2 M4Us. In the other SoCs, we can get the larb-id from M4U in which the larbs in the "mediatek,larbs" always are ordered. Correspondingly, the larb_nr in the "struct mtk_smi_iommu" could also be deleted. CC: Matthias Brugger Signed-off-by: Yong Wu Reviewed-by: Evan Green Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 1 - drivers/iommu/mtk_iommu_v1.c | 2 -- drivers/memory/mtk-smi.c | 26 ++------------------------ include/soc/mediatek/smi.h | 1 - 4 files changed, 2 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5d5341c85650..cc81de27c5fe 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -656,7 +656,6 @@ static int mtk_iommu_probe(struct platform_device *pdev) "mediatek,larbs", NULL); if (larb_nr < 0) return larb_nr; - data->smi_imu.larb_nr = larb_nr; for (i = 0; i < larb_nr; i++) { struct device_node *larbnode; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index abeeac488372..3922358d13bf 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -616,8 +616,6 @@ static int mtk_iommu_probe(struct platform_device *pdev) larb_nr++; } - data->smi_imu.larb_nr = larb_nr; - platform_set_drvdata(pdev, data); ret = mtk_iommu_hw_init(data); diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 289e59543a04..d6dc62fd89fe 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -59,7 +59,6 @@ struct mtk_smi_common_plat { }; struct mtk_smi_larb_gen { - bool need_larbid; int port_in_larb[MTK_LARB_NR_MAX + 1]; void (*config_port)(struct device *); unsigned int larb_direct_to_common_mask; @@ -147,18 +146,9 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) struct mtk_smi_iommu *smi_iommu = data; unsigned int i; - if (larb->larb_gen->need_larbid) { - larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu; - return 0; - } - - /* - * If there is no larbid property, Loop to find the corresponding - * iommu information. - */ - for (i = 0; i < smi_iommu->larb_nr; i++) { + for (i = 0; i < MTK_LARB_NR_MAX; i++) { if (dev == smi_iommu->larb_imu[i].dev) { - /* The 'mmu' may be updated in iommu-attach/detach. */ + larb->larbid = i; larb->mmu = &smi_iommu->larb_imu[i].mmu; return 0; } @@ -237,7 +227,6 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = { }; static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { - .need_larbid = true, .port_in_larb = { LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, LARB2_PORT_OFFSET, LARB3_PORT_OFFSET @@ -246,7 +235,6 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { }; static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { - .need_larbid = true, .config_port = mtk_smi_larb_config_port_gen2_general, .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */ }; @@ -285,7 +273,6 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *smi_node; struct platform_device *smi_pdev; - int err; larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL); if (!larb) @@ -315,15 +302,6 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) } larb->smi.dev = dev; - if (larb->larb_gen->need_larbid) { - err = of_property_read_u32(dev->of_node, "mediatek,larb-id", - &larb->larbid); - if (err) { - dev_err(dev, "missing larbid property\n"); - return err; - } - } - smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0); if (!smi_node) return -EINVAL; diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h index 79b74ced9d91..6f0b00cc73ea 100644 --- a/include/soc/mediatek/smi.h +++ b/include/soc/mediatek/smi.h @@ -21,7 +21,6 @@ struct mtk_smi_larb_iommu { }; struct mtk_smi_iommu { - unsigned int larb_nr; struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; }; -- cgit v1.2.3 From 1ee9feb2c9f893b893c900d2492c6a01dca680f3 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Sat, 24 Aug 2019 11:02:08 +0800 Subject: iommu/mediatek: Clean up struct mtk_smi_iommu Remove the "struct mtk_smi_iommu" to simplify the code since it has only one item in it right now. Signed-off-by: Yong Wu Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 4 ++-- drivers/iommu/mtk_iommu.h | 6 +++--- drivers/iommu/mtk_iommu_v1.c | 4 ++-- drivers/memory/mtk-smi.c | 6 +++--- include/soc/mediatek/smi.h | 4 ---- 5 files changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index cc81de27c5fe..400066d0d37b 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -278,7 +278,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, for (i = 0; i < fwspec->num_ids; ++i) { larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); portid = MTK_M4U_TO_PORT(fwspec->ids[i]); - larb_mmu = &data->smi_imu.larb_imu[larbid]; + larb_mmu = &data->larb_imu[larbid]; dev_dbg(dev, "%s iommu port: %d\n", enable ? "enable" : "disable", portid); @@ -680,7 +680,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) of_node_put(larbnode); return -EPROBE_DEFER; } - data->smi_imu.larb_imu[id].dev = &plarbdev->dev; + data->larb_imu[id].dev = &plarbdev->dev; component_match_add_release(dev, &match, release_of, compare_of, larbnode); diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 56b579c5a088..fc0f16eabacd 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -56,7 +56,6 @@ struct mtk_iommu_data { struct mtk_iommu_suspend_reg reg; struct mtk_iommu_domain *m4u_dom; struct iommu_group *m4u_group; - struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ bool enable_4GB; bool tlb_flush_active; @@ -64,6 +63,7 @@ struct mtk_iommu_data { const struct mtk_iommu_plat_data *plat_data; struct list_head list; + struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; }; static inline int compare_of(struct device *dev, void *data) @@ -80,14 +80,14 @@ static inline int mtk_iommu_bind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); - return component_bind_all(dev, &data->smi_imu); + return component_bind_all(dev, &data->larb_imu); } static inline void mtk_iommu_unbind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); - component_unbind_all(dev, &data->smi_imu); + component_unbind_all(dev, &data->larb_imu); } #endif diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 3922358d13bf..860926c65903 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -206,7 +206,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, for (i = 0; i < fwspec->num_ids; ++i) { larbid = mt2701_m4u_to_larb(fwspec->ids[i]); portid = mt2701_m4u_to_port(fwspec->ids[i]); - larb_mmu = &data->smi_imu.larb_imu[larbid]; + larb_mmu = &data->larb_imu[larbid]; dev_dbg(dev, "%s iommu port: %d\n", enable ? "enable" : "disable", portid); @@ -610,7 +610,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) } } - data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; + data->larb_imu[larb_nr].dev = &plarbdev->dev; component_match_add_release(dev, &match, release_of, compare_of, larb_spec.np); larb_nr++; diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index d6dc62fd89fe..439d7d886873 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -143,13 +143,13 @@ static int mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); - struct mtk_smi_iommu *smi_iommu = data; + struct mtk_smi_larb_iommu *larb_mmu = data; unsigned int i; for (i = 0; i < MTK_LARB_NR_MAX; i++) { - if (dev == smi_iommu->larb_imu[i].dev) { + if (dev == larb_mmu[i].dev) { larb->larbid = i; - larb->mmu = &smi_iommu->larb_imu[i].mmu; + larb->mmu = &larb_mmu[i].mmu; return 0; } } diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h index 6f0b00cc73ea..5a34b87d89e3 100644 --- a/include/soc/mediatek/smi.h +++ b/include/soc/mediatek/smi.h @@ -20,10 +20,6 @@ struct mtk_smi_larb_iommu { unsigned int mmu; }; -struct mtk_smi_iommu { - struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; -}; - /* * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter. * It also initialize some basic setting(like iommu). -- cgit v1.2.3 From 6b0c54e7f2715997c366e8374209bc74259b0a59 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Sat, 24 Aug 2019 09:47:12 +0800 Subject: iommu/dma: Fix for dereferencing before null checking The cookie is dereferenced before null checking in the function iommu_dma_init_domain. This patch moves the dereferencing after the null checking. Fixes: fdbe574eb693 ("iommu/dma: Allow MSI-only cookies") Signed-off-by: Yunsheng Lin Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 80beb1f5994a..8ce9db9c2cf6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn; + struct iova_domain *iovad; int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; + iovad = &cookie->iovad; + /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); base_pfn = max_t(unsigned long, 1, base >> order); -- cgit v1.2.3 From d127bc9be856098cc2410c1266ed64e258bc5377 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Mon, 26 Aug 2019 05:48:21 +0100 Subject: iommu: Remove wrong default domain comments These comments are wrong. request_default_domain_for_dev doesn't just handle direct mapped domains. Signed-off-by: Tom Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b6980938882e..66cfacaa483d 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2200,7 +2200,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) mutex_lock(&group->mutex); - /* Check if the default domain is already direct mapped */ ret = 0; if (group->default_domain && group->default_domain->type == type) goto out; @@ -2210,7 +2209,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) if (iommu_group_device_count(group) != 1) goto out; - /* Allocate a direct mapped domain */ ret = -ENOMEM; domain = __iommu_domain_alloc(dev->bus, type); if (!domain) @@ -2225,7 +2223,7 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) iommu_group_create_direct_mappings(group, dev); - /* Make the direct mapped domain the default for this group */ + /* Make the domain the default for this group */ if (group->default_domain) iommu_domain_free(group->default_domain); group->default_domain = domain; -- cgit v1.2.3 From 8758553791dfb31ec2640f6b69df748720c0b1a4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 29 Aug 2019 23:03:27 -0500 Subject: iommu/qcom: Use struct_size() helper One of the more common cases of allocation size calculations is finding the size of a structure that has a zero-sized array at the end, along with memory for some number of elements for that array. For example: struct qcom_iommu_dev { ... struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ }; Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. So, replace the following form: sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])) with: struct_size(qcom_iommu, ctxs, max_asid) Also, notice that, in this case, variable sz is not necessary, hence it is removed. This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Joerg Roedel --- drivers/iommu/qcom_iommu.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..41cf9bd5a600 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -775,7 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) struct qcom_iommu_dev *qcom_iommu; struct device *dev = &pdev->dev; struct resource *res; - int ret, sz, max_asid = 0; + int ret, max_asid = 0; /* find the max asid (which is 1:1 to ctx bank idx), so we know how * many child ctx devices we have: @@ -783,9 +783,8 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) for_each_child_of_node(dev->of_node, child) max_asid = max(max_asid, get_asid(child)); - sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])); - - qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL); + qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), + GFP_KERNEL); if (!qcom_iommu) return -ENOMEM; qcom_iommu->num_ctxs = max_asid; -- cgit v1.2.3 From 097a7df2e3af1b716cd39ce90001a2c372488e58 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 3 Sep 2019 14:50:56 +0800 Subject: iommu/arm-smmu-v3: Fix build error without CONFIG_PCI_ATS If CONFIG_PCI_ATS is not set, building fails: drivers/iommu/arm-smmu-v3.c: In function arm_smmu_ats_supported: drivers/iommu/arm-smmu-v3.c:2325:35: error: struct pci_dev has no member named ats_cap; did you mean msi_cap? return !pdev->untrusted && pdev->ats_cap; ^~~~~~~ ats_cap should only used when CONFIG_PCI_ATS is defined, so use #ifdef block to guard this. Fixes: bfff88ec1afe ("iommu/arm-smmu-v3: Rework enabling/disabling of ATS for PCI masters") Signed-off-by: YueHaibing Signed-off-by: Joerg Roedel --- drivers/iommu/arm-smmu-v3.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 29b773e186c8..4aa414843557 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2311,6 +2311,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) } } +#ifdef CONFIG_PCI_ATS static bool arm_smmu_ats_supported(struct arm_smmu_master *master) { struct pci_dev *pdev; @@ -2324,6 +2325,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master) pdev = to_pci_dev(master->dev); return !pdev->untrusted && pdev->ats_cap; } +#else +static bool arm_smmu_ats_supported(struct arm_smmu_master *master) +{ + return false; +} +#endif static void arm_smmu_enable_ats(struct arm_smmu_master *master) { -- cgit v1.2.3 From 2896ba40d0becdb72b45f096cad70633abc014f6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 3 Sep 2019 15:15:44 +0200 Subject: iommu: Don't use sme_active() in generic code Switch to the generic function mem_encrypt_active() because sme_active() is x86 specific and can't be called from generic code on other platforms than x86. Fixes: 2cc13bb4f59f ("iommu: Disable passthrough mode when SME is active") Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 66cfacaa483d..d658c7c6a2ab 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -120,8 +120,8 @@ static int __init iommu_subsys_init(void) else iommu_set_default_translated(false); - if (iommu_default_passthrough() && sme_active()) { - pr_info("SME detected - Disabling default IOMMU Passthrough\n"); + if (iommu_default_passthrough() && mem_encrypt_active()) { + pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); iommu_set_default_translated(false); } } -- cgit v1.2.3 From 5ca54fdc9b1e6a8b3c03eaa8d1b6b4f406e90ad8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Sep 2019 14:08:01 +0200 Subject: iommu/ipmmu-vmsa: Move IMTTBCR_SL0_TWOBIT_* to restore sort order Move the recently added IMTTBCR_SL0_TWOBIT_* definitions up, to make sure all IMTTBCR register bit definitions are sorted by decreasing bit index. Add comments to make it clear that they exist on R-Car Gen3 only. Fixes: c295f504fb5a38ab ("iommu/ipmmu-vmsa: Allow two bit SL0") Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ad0098c0c87c..7a7318955859 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -145,15 +145,14 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IMTTBCR_IRGN0_WT (2 << 8) #define IMTTBCR_IRGN0_WB (3 << 8) #define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */ +#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */ +#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ #define IMTTBCR_SL0_LVL_2 (0 << 4) #define IMTTBCR_SL0_LVL_1 (1 << 4) #define IMTTBCR_TSZ0_MASK (7 << 0) #define IMTTBCR_TSZ0_SHIFT O -#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) - #define IMBUSCR 0x000c #define IMBUSCR_DVM (1 << 2) #define IMBUSCR_BUSSEL_SYS (0 << 0) -- cgit v1.2.3 From 3623002f0f761efd7eb38cfa5af1d2319365a8fd Mon Sep 17 00:00:00 2001 From: Hai Nguyen Pham Date: Wed, 4 Sep 2019 14:08:02 +0200 Subject: iommu/ipmmu-vmsa: Disable cache snoop transactions on R-Car Gen3 According to the Hardware Manual Errata for Rev. 1.50 of April 10, 2019, cache snoop transactions for page table walk requests are not supported on R-Car Gen3. Hence, this patch removes setting these fields in the IMTTBCR register, since it will have no effect, and adds comments to the register bit definitions, to make it clear they apply to R-Car Gen2 only. Signed-off-by: Hai Nguyen Pham [geert: Reword, add comments] Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 71 +++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 7a7318955859..49db9d682548 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -49,6 +49,7 @@ struct ipmmu_features { bool setup_imbuscr; bool twobit_imttbcr_sl0; bool reserved_context; + bool cache_snoop; }; struct ipmmu_vmsa_device { @@ -115,36 +116,36 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IMTTBCR 0x0008 #define IMTTBCR_EAE (1 << 31) #define IMTTBCR_PMB (1 << 30) -#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) -#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) -#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) -#define IMTTBCR_SH1_MASK (3 << 28) -#define IMTTBCR_ORGN1_NC (0 << 26) -#define IMTTBCR_ORGN1_WB_WA (1 << 26) -#define IMTTBCR_ORGN1_WT (2 << 26) -#define IMTTBCR_ORGN1_WB (3 << 26) -#define IMTTBCR_ORGN1_MASK (3 << 26) -#define IMTTBCR_IRGN1_NC (0 << 24) -#define IMTTBCR_IRGN1_WB_WA (1 << 24) -#define IMTTBCR_IRGN1_WT (2 << 24) -#define IMTTBCR_IRGN1_WB (3 << 24) -#define IMTTBCR_IRGN1_MASK (3 << 24) +#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */ +#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */ +#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */ +#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */ #define IMTTBCR_TSZ1_MASK (7 << 16) #define IMTTBCR_TSZ1_SHIFT 16 -#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) -#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) -#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) -#define IMTTBCR_SH0_MASK (3 << 12) -#define IMTTBCR_ORGN0_NC (0 << 10) -#define IMTTBCR_ORGN0_WB_WA (1 << 10) -#define IMTTBCR_ORGN0_WT (2 << 10) -#define IMTTBCR_ORGN0_WB (3 << 10) -#define IMTTBCR_ORGN0_MASK (3 << 10) -#define IMTTBCR_IRGN0_NC (0 << 8) -#define IMTTBCR_IRGN0_WB_WA (1 << 8) -#define IMTTBCR_IRGN0_WT (2 << 8) -#define IMTTBCR_IRGN0_WB (3 << 8) -#define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */ #define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */ #define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */ #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ @@ -421,17 +422,19 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) /* * TTBCR - * We use long descriptors with inner-shareable WBWA tables and allocate - * the whole 32-bit VA space to TTBR0. + * We use long descriptors and allocate the whole 32-bit VA space to + * TTBR0. */ if (domain->mmu->features->twobit_imttbcr_sl0) tmp = IMTTBCR_SL0_TWOBIT_LVL_1; else tmp = IMTTBCR_SL0_LVL_1; - ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | - IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | - IMTTBCR_IRGN0_WB_WA | tmp); + if (domain->mmu->features->cache_snoop) + tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA; + + ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); /* MAIR0 */ ipmmu_ctx_write_root(domain, IMMAIR0, @@ -987,6 +990,7 @@ static const struct ipmmu_features ipmmu_features_default = { .setup_imbuscr = true, .twobit_imttbcr_sl0 = false, .reserved_context = false, + .cache_snoop = true, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { @@ -997,6 +1001,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .setup_imbuscr = false, .twobit_imttbcr_sl0 = true, .reserved_context = true, + .cache_snoop = false, }; static const struct of_device_id ipmmu_of_ids[] = { -- cgit v1.2.3 From 96088a203a0b6c7564bc14167eec89c5427fd18e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 6 Sep 2019 17:15:38 +0200 Subject: iommu/omap: Mark pm functions __maybe_unused The runtime_pm functions are unused when CONFIG_PM is disabled: drivers/iommu/omap-iommu.c:1022:12: error: unused function 'omap_iommu_runtime_suspend' [-Werror,-Wunused-function] static int omap_iommu_runtime_suspend(struct device *dev) drivers/iommu/omap-iommu.c:1064:12: error: unused function 'omap_iommu_runtime_resume' [-Werror,-Wunused-function] static int omap_iommu_runtime_resume(struct device *dev) Mark them as __maybe_unused to let gcc silently drop them instead of warning. Fixes: db8918f61d51 ("iommu/omap: streamline enable/disable through runtime pm callbacks") Signed-off-by: Arnd Bergmann Acked-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 8645e9b175a3..4879c8c4d198 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1019,7 +1019,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); * reset line. This function also saves the context of any * locked TLBs if suspending. **/ -static int omap_iommu_runtime_suspend(struct device *dev) +static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct iommu_platform_data *pdata = dev_get_platdata(dev); @@ -1061,7 +1061,7 @@ static int omap_iommu_runtime_suspend(struct device *dev) * reset line. The function also restores any locked TLBs if * resuming after a suspend. **/ -static int omap_iommu_runtime_resume(struct device *dev) +static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct iommu_platform_data *pdata = dev_get_platdata(dev); -- cgit v1.2.3 From 3fc1ca00653db6371585e3c21c4b873b2f20e60a Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 6 Sep 2019 14:14:48 +0800 Subject: swiotlb: Split size parameter to map/unmap APIs This splits the size parameter to swiotlb_tbl_map_single() and swiotlb_tbl_unmap_single() into an alloc_size and a mapping_size parameter, where the latter one is rounded up to the iommu page size. Suggested-by: Christoph Hellwig Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/xen/swiotlb-xen.c | 8 ++++---- include/linux/swiotlb.h | 8 ++++++-- kernel/dma/direct.c | 2 +- kernel/dma/swiotlb.c | 34 +++++++++++++++++++++------------- 4 files changed, 32 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index ae1df496bf38..adcabd9473eb 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, */ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, - attrs); + map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, + size, size, dir, attrs); if (map == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; @@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size))) { - swiotlb_tbl_unmap_single(dev, map, size, dir, + swiotlb_tbl_unmap_single(dev, map, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } @@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); + swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); } static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 361f62bb4a8e..cde3dc18e21a 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -46,13 +46,17 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t phys, size_t size, + phys_addr_t phys, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 795c9b095d75..a7f2a0163426 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -297,7 +297,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); } EXPORT_SYMBOL(dma_direct_unmap_page); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 9de232229063..796a44f8ef5a 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t orig_addr, size_t size, + phys_addr_t orig_addr, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { @@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, pr_warn_once("%s is active and system is using DMA bounce buffers\n", sme_active() ? "SME" : "SEV"); + if (mapping_size > alloc_size) { + dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", + mapping_size, alloc_size); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; @@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* - * Carefully handle integer overflow which can occur when mask == ~0UL. - */ + * Carefully handle integer overflow which can occur when mask == ~0UL. + */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); @@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, * For mappings greater than or equal to a page, we limit the stride * (and hence alignment) to a page size. */ - nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size >= PAGE_SIZE) + nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + if (alloc_size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; @@ -547,7 +555,7 @@ not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - size, io_tlb_nslabs, tmp_io_tlb_used); + alloc_size, io_tlb_nslabs, tmp_io_tlb_used); return (phys_addr_t)DMA_MAPPING_ERROR; found: io_tlb_used += nslots; @@ -562,7 +570,7 @@ found: io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } @@ -571,11 +579,11 @@ found: * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) + size_t mapping_size, size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; - int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; @@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, if (orig_addr != INVALID_PHYS_ADDR && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding @@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, /* Oh well, have to allocate and map a bounce buffer. */ *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), - *phys, size, dir, attrs); + *phys, size, size, dir, attrs); if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) return false; /* Ensure that the address returned is DMA'ble */ *dma_addr = __phys_to_dma(dev, *phys); if (unlikely(!dma_capable(dev, *dma_addr, size))) { - swiotlb_tbl_unmap_single(dev, *phys, size, dir, + swiotlb_tbl_unmap_single(dev, *phys, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return false; } -- cgit v1.2.3 From e5e04d051979dbd636a99099b7a595093c50a4bc Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 6 Sep 2019 14:14:49 +0800 Subject: iommu/vt-d: Check whether device requires bounce buffer This adds a helper to check whether a device needs to use bounce buffer. It also provides a boot time option to disable the bounce buffer. Users can use this to prevent the iommu driver from using the bounce buffer for performance gain. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Signed-off-by: Lu Baolu Tested-by: Xu Pengfei Tested-by: Mika Westerberg Signed-off-by: Joerg Roedel --- Documentation/admin-guide/kernel-parameters.txt | 5 +++++ drivers/iommu/intel-iommu.c | 7 +++++++ 2 files changed, 12 insertions(+) (limited to 'drivers') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 47d981a86e2f..aaca73080097 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1732,6 +1732,11 @@ Note that using this option lowers the security provided by tboot because it makes the system vulnerable to DMA attacks. + nobounce [Default off] + Disable bounce buffer for unstrusted devices such as + the Thunderbolt devices. This will treat the untrusted + devices as the trusted ones, hence might expose security + risks of DMA attacks. intel_idle.max_cstate= [KNL,HW,ACPI,X86] 0 disables intel_idle and fall back on acpi_idle. diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12d094d08c0a..ce6baabc9dcc 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -360,6 +360,7 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; +static int intel_no_bounce; #define IDENTMAP_ALL 1 #define IDENTMAP_GFX 2 @@ -373,6 +374,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); +#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \ + to_pci_dev(d)->untrusted) + /* * Iterate over elements in device_domain_list and call the specified * callback @fn against each element. @@ -455,6 +459,9 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; + } else if (!strncmp(str, "nobounce", 8)) { + pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); + intel_no_bounce = 1; } str += strcspn(str, ","); -- cgit v1.2.3 From c5a5dc4cbbf4540c1891cdb2b70cf469405ea61f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 6 Sep 2019 14:14:50 +0800 Subject: iommu/vt-d: Don't switch off swiotlb if bounce page is used The bounce page implementation depends on swiotlb. Hence, don't switch off swiotlb if the system has untrusted devices or could potentially be hot-added with any untrusted devices. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 1 + drivers/iommu/intel-iommu.c | 32 +++++++++++++++++--------------- 2 files changed, 18 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e15cdcd8cb3c..a4ddeade8ac4 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -182,6 +182,7 @@ config INTEL_IOMMU select IOMMU_IOVA select NEED_DMA_MAP_STATE select DMAR_TABLE + select SWIOTLB help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ce6baabc9dcc..0af7b4669264 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4547,22 +4547,20 @@ const struct attribute_group *intel_iommu_groups[] = { NULL, }; -static int __init platform_optin_force_iommu(void) +static inline bool has_untrusted_dev(void) { struct pci_dev *pdev = NULL; - bool has_untrusted_dev = false; - if (!dmar_platform_optin() || no_platform_optin) - return 0; + for_each_pci_dev(pdev) + if (pdev->untrusted) + return true; - for_each_pci_dev(pdev) { - if (pdev->untrusted) { - has_untrusted_dev = true; - break; - } - } + return false; +} - if (!has_untrusted_dev) +static int __init platform_optin_force_iommu(void) +{ + if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev()) return 0; if (no_iommu || dmar_disabled) @@ -4576,9 +4574,6 @@ static int __init platform_optin_force_iommu(void) iommu_identity_mapping |= IDENTMAP_ALL; dmar_disabled = 0; -#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) - swiotlb = 0; -#endif no_iommu = 0; return 1; @@ -4718,7 +4713,14 @@ int __init intel_iommu_init(void) up_write(&dmar_global_lock); #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) - swiotlb = 0; + /* + * If the system has no untrusted device or the user has decided + * to disable the bounce page mechanisms, we don't need swiotlb. + * Mark this and the pre-allocated bounce pages will be released + * later. + */ + if (!has_untrusted_dev() || intel_no_bounce) + swiotlb = 0; #endif dma_ops = &intel_dma_ops; -- cgit v1.2.3 From 3b53034c268d550d9e8522e613a14ab53b8840d8 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 6 Sep 2019 14:14:51 +0800 Subject: iommu/vt-d: Add trace events for device dma map/unmap This adds trace support for the Intel IOMMU driver. It also declares some events which could be used to trace the events when an IOVA is being mapped or unmapped in a domain. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Signed-off-by: Mika Westerberg Signed-off-by: Lu Baolu Reviewed-by: Steven Rostedt (VMware) Signed-off-by: Joerg Roedel --- drivers/iommu/Makefile | 1 + drivers/iommu/intel-iommu.c | 13 +++-- drivers/iommu/intel-trace.c | 14 +++++ include/trace/events/intel_iommu.h | 106 +++++++++++++++++++++++++++++++++++++ 4 files changed, 131 insertions(+), 3 deletions(-) create mode 100644 drivers/iommu/intel-trace.c create mode 100644 include/trace/events/intel_iommu.h (limited to 'drivers') diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f13f36ae1af6..bfe27b2755bd 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o +obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0af7b4669264..12831beead02 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3512,6 +3512,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr += paddr & ~PAGE_MASK; + + trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT); + return start_paddr; error: @@ -3567,10 +3570,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) if (dev_is_pci(dev)) pdev = to_pci_dev(dev); - dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn); - freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict || (pdev && pdev->untrusted) || !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, @@ -3586,6 +3586,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) * cpu used up by the iotlb flush operation... */ } + + trace_unmap_single(dev, dev_addr, size); } static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, @@ -3676,6 +3678,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, } intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); + + trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT); } static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, @@ -3732,6 +3736,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele return 0; } + trace_map_sg(dev, iova_pfn << PAGE_SHIFT, + sg_phys(sglist), size << VTD_PAGE_SHIFT); + return nelems; } diff --git a/drivers/iommu/intel-trace.c b/drivers/iommu/intel-trace.c new file mode 100644 index 000000000000..bfb6a6e37a88 --- /dev/null +++ b/drivers/iommu/intel-trace.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel IOMMU trace support + * + * Copyright (C) 2019 Intel Corporation + * + * Author: Lu Baolu + */ + +#include +#include + +#define CREATE_TRACE_POINTS +#include diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h new file mode 100644 index 000000000000..54e61d456cdf --- /dev/null +++ b/include/trace/events/intel_iommu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel IOMMU trace support + * + * Copyright (C) 2019 Intel Corporation + * + * Author: Lu Baolu + */ +#ifdef CONFIG_INTEL_IOMMU +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel_iommu + +#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INTEL_IOMMU_H + +#include +#include + +DECLARE_EVENT_CLASS(dma_map, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + + TP_ARGS(dev, dev_addr, phys_addr, size), + + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(dma_addr_t, dev_addr) + __field(phys_addr_t, phys_addr) + __field(size_t, size) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dev_addr = dev_addr; + __entry->phys_addr = phys_addr; + __entry->size = size; + ), + + TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu", + __get_str(dev_name), + (unsigned long long)__entry->dev_addr, + (unsigned long long)__entry->phys_addr, + __entry->size) +); + +DEFINE_EVENT(dma_map, map_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DEFINE_EVENT(dma_map, map_sg, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DEFINE_EVENT(dma_map, bounce_map_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DECLARE_EVENT_CLASS(dma_unmap, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + + TP_ARGS(dev, dev_addr, size), + + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(dma_addr_t, dev_addr) + __field(size_t, size) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dev_addr = dev_addr; + __entry->size = size; + ), + + TP_printk("dev=%s dev_addr=0x%llx size=%zu", + __get_str(dev_name), + (unsigned long long)__entry->dev_addr, + __entry->size) +); + +DEFINE_EVENT(dma_unmap, unmap_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +DEFINE_EVENT(dma_unmap, unmap_sg, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +DEFINE_EVENT(dma_unmap, bounce_unmap_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +#endif /* _TRACE_INTEL_IOMMU_H */ + +/* This part must be outside protection */ +#include +#endif /* CONFIG_INTEL_IOMMU */ -- cgit v1.2.3 From cfb94a372f2d4ee226247447c863f8709863d170 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 6 Sep 2019 14:14:52 +0800 Subject: iommu/vt-d: Use bounce buffer for untrusted devices The Intel VT-d hardware uses paging for DMA remapping. The minimum mapped window is a page size. The device drivers may map buffers not filling the whole IOMMU window. This allows the device to access to possibly unrelated memory and a malicious device could exploit this to perform DMA attacks. To address this, the Intel IOMMU driver will use bounce pages for those buffers which don't fill whole IOMMU pages. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Signed-off-by: Lu Baolu Tested-by: Xu Pengfei Tested-by: Mika Westerberg Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 258 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 258 insertions(+) (limited to 'drivers') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12831beead02..b034fe727ead 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -41,9 +41,11 @@ #include #include #include +#include #include #include #include +#include #include "irq_remapping.h" #include "intel-pasid.h" @@ -344,6 +346,8 @@ static int domain_detach_iommu(struct dmar_domain *domain, static bool device_is_rmrr_locked(struct device *dev); static int intel_iommu_attach_device(struct iommu_domain *domain, struct device *dev); +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova); #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON int dmar_disabled = 0; @@ -3754,6 +3758,252 @@ static const struct dma_map_ops intel_dma_ops = { .dma_supported = dma_direct_supported, }; +static void +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, enum dma_sync_target target) +{ + struct dmar_domain *domain; + phys_addr_t tlb_addr; + + domain = find_domain(dev); + if (WARN_ON(!domain)) + return; + + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target); +} + +static dma_addr_t +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, + enum dma_data_direction dir, unsigned long attrs, + u64 dma_mask) +{ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); + struct dmar_domain *domain; + struct intel_iommu *iommu; + unsigned long iova_pfn; + unsigned long nrpages; + phys_addr_t tlb_addr; + int prot = 0; + int ret; + + domain = find_domain(dev); + if (WARN_ON(dir == DMA_NONE || !domain)) + return DMA_MAPPING_ERROR; + + iommu = domain_get_iommu(domain); + if (WARN_ON(!iommu)) + return DMA_MAPPING_ERROR; + + nrpages = aligned_nrpages(0, size); + iova_pfn = intel_alloc_iova(dev, domain, + dma_to_mm_pfn(nrpages), dma_mask); + if (!iova_pfn) + return DMA_MAPPING_ERROR; + + /* + * Check if DMAR supports zero-length reads on write only + * mappings.. + */ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || + !cap_zlr(iommu->cap)) + prot |= DMA_PTE_READ; + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + prot |= DMA_PTE_WRITE; + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { + tlb_addr = swiotlb_tbl_map_single(dev, + __phys_to_dma(dev, io_tlb_start), + paddr, size, aligned_size, dir, attrs); + if (tlb_addr == DMA_MAPPING_ERROR) { + goto swiotlb_error; + } else { + /* Cleanup the padding area. */ + void *padding_start = phys_to_virt(tlb_addr); + size_t padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || + dir == DMA_BIDIRECTIONAL)) { + padding_start += size; + padding_size -= size; + } + + memset(padding_start, 0, padding_size); + } + } else { + tlb_addr = paddr; + } + + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), + tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot); + if (ret) + goto mapping_error; + + trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size); + + return (phys_addr_t)iova_pfn << PAGE_SHIFT; + +mapping_error: + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_unmap_single(dev, tlb_addr, size, + aligned_size, dir, attrs); +swiotlb_error: + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); + dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n", + size, (unsigned long long)paddr, dir); + + return DMA_MAPPING_ERROR; +} + +static void +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); + struct dmar_domain *domain; + phys_addr_t tlb_addr; + + domain = find_domain(dev); + if (WARN_ON(!domain)) + return; + + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); + if (WARN_ON(!tlb_addr)) + return; + + intel_unmap(dev, dev_addr, size); + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_unmap_single(dev, tlb_addr, size, + aligned_size, dir, attrs); + + trace_bounce_unmap_single(dev, dev_addr, size); +} + +static dma_addr_t +bounce_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return bounce_map_single(dev, page_to_phys(page) + offset, + size, dir, attrs, *dev->dma_mask); +} + +static dma_addr_t +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + return bounce_map_single(dev, phys_addr, size, + dir, attrs, *dev->dma_mask); +} + +static void +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_unmap_page(dev, sg->dma_address, + sg_dma_len(sg), dir, attrs); +} + +static int +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) { + sg->dma_address = bounce_map_page(dev, sg_page(sg), + sg->offset, sg->length, + dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; + sg_dma_len(sg) = sg->length; + } + + return nelems; + +out_unmap: + bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return 0; +} + +static void +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE); +} + +static void +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_sync_single(dev, sg_dma_address(sg), + sg_dma_len(sg), dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_sync_single(dev, sg_dma_address(sg), + sg_dma_len(sg), dir, SYNC_FOR_DEVICE); +} + +static const struct dma_map_ops bounce_dma_ops = { + .alloc = intel_alloc_coherent, + .free = intel_free_coherent, + .map_sg = bounce_map_sg, + .unmap_sg = bounce_unmap_sg, + .map_page = bounce_map_page, + .unmap_page = bounce_unmap_page, + .sync_single_for_cpu = bounce_sync_single_for_cpu, + .sync_single_for_device = bounce_sync_single_for_device, + .sync_sg_for_cpu = bounce_sync_sg_for_cpu, + .sync_sg_for_device = bounce_sync_sg_for_device, + .map_resource = bounce_map_resource, + .unmap_resource = bounce_unmap_resource, + .dma_supported = dma_direct_supported, +}; + static inline int iommu_domain_cache_init(void) { int ret = 0; @@ -5325,6 +5575,11 @@ static int intel_iommu_add_device(struct device *dev) } } + if (device_needs_bounce(dev)) { + dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n"); + set_dma_ops(dev, &bounce_dma_ops); + } + return 0; } @@ -5342,6 +5597,9 @@ static void intel_iommu_remove_device(struct device *dev) iommu_group_remove_device(dev); iommu_device_unlink(&iommu->iommu, dev); + + if (device_needs_bounce(dev)) + set_dma_ops(dev, NULL); } static void intel_iommu_get_resv_regions(struct device *device, -- cgit v1.2.3 From fd730007a06e9b11664e3816fcebd3faa91761ea Mon Sep 17 00:00:00 2001 From: Kyung Min Park Date: Fri, 6 Sep 2019 11:14:02 -0700 Subject: iommu/vt-d: Add Scalable Mode fault information Intel VT-d specification revision 3 added support for Scalable Mode Translation for DMA remapping. Add the Scalable Mode fault reasons to show detailed fault reasons when the translation fault happens. Link: https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf Reviewed-by: Sohil Mehta Signed-off-by: Kyung Min Park Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 77 ++++++++++++++++++++++++++++++++++++++++++--- include/linux/intel-iommu.h | 2 ++ 2 files changed, 75 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 5d0754ed5fa0..eecd6a421667 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1519,6 +1519,64 @@ static const char *dma_remap_fault_reasons[] = "PCE for translation request specifies blocking", }; +static const char * const dma_remap_sm_fault_reasons[] = { + "SM: Invalid Root Table Address", + "SM: TTM 0 for request with PASID", + "SM: TTM 0 for page group request", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */ + "SM: Error attempting to access Root Entry", + "SM: Present bit in Root Entry is clear", + "SM: Non-zero reserved field set in Root Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */ + "SM: Error attempting to access Context Entry", + "SM: Present bit in Context Entry is clear", + "SM: Non-zero reserved field set in the Context Entry", + "SM: Invalid Context Entry", + "SM: DTE field in Context Entry is clear", + "SM: PASID Enable field in Context Entry is clear", + "SM: PASID is larger than the max in Context Entry", + "SM: PRE field in Context-Entry is clear", + "SM: RID_PASID field error in Context-Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */ + "SM: Error attempting to access the PASID Directory Entry", + "SM: Present bit in Directory Entry is clear", + "SM: Non-zero reserved field set in PASID Directory Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */ + "SM: Error attempting to access PASID Table Entry", + "SM: Present bit in PASID Table Entry is clear", + "SM: Non-zero reserved field set in PASID Table Entry", + "SM: Invalid Scalable-Mode PASID Table Entry", + "SM: ERE field is clear in PASID Table Entry", + "SM: SRE field is clear in PASID Table Entry", + "Unknown", "Unknown",/* 0x5E-0x5F */ + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */ + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */ + "SM: Error attempting to access first-level paging entry", + "SM: Present bit in first-level paging entry is clear", + "SM: Non-zero reserved field set in first-level paging entry", + "SM: Error attempting to access FL-PML4 entry", + "SM: First-level entry address beyond MGAW in Nested translation", + "SM: Read permission error in FL-PML4 entry in Nested translation", + "SM: Read permission error in first-level paging entry in Nested translation", + "SM: Write permission error in first-level paging entry in Nested translation", + "SM: Error attempting to access second-level paging entry", + "SM: Read/Write permission error in second-level paging entry", + "SM: Non-zero reserved field set in second-level paging entry", + "SM: Invalid second-level page table pointer", + "SM: A/D bit update needed in second-level entry when set up in no snoop", + "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */ + "SM: Address in first-level translation is not canonical", + "SM: U/S set 0 for first-level translation with user privilege", + "SM: No execute permission for request with PASID and ER=1", + "SM: Address beyond the DMA hardware max", + "SM: Second-level entry address beyond the max", + "SM: No write permission for Write/AtomicOp request", + "SM: No read permission for Read/AtomicOp request", + "SM: Invalid address-interrupt address", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */ + "SM: A/D bit update needed in first-level entry when set up in no snoop", +}; + static const char *irq_remap_fault_reasons[] = { "Detected reserved fields in the decoded interrupt-remapped request", @@ -1536,6 +1594,10 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) ARRAY_SIZE(irq_remap_fault_reasons))) { *fault_type = INTR_REMAP; return irq_remap_fault_reasons[fault_reason - 0x20]; + } else if (fault_reason >= 0x30 && (fault_reason - 0x30 < + ARRAY_SIZE(dma_remap_sm_fault_reasons))) { + *fault_type = DMA_REMAP; + return dma_remap_sm_fault_reasons[fault_reason - 0x30]; } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { *fault_type = DMA_REMAP; return dma_remap_fault_reasons[fault_reason]; @@ -1611,7 +1673,8 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int dmar_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, unsigned long long addr) + u8 fault_reason, int pasid, u16 source_id, + unsigned long long addr) { const char *reason; int fault_type; @@ -1624,10 +1687,11 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type, PCI_FUNC(source_id & 0xFF), addr >> 48, fault_reason, reason); else - pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n", + pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n", type ? "DMA Read" : "DMA Write", source_id >> 8, PCI_SLOT(source_id & 0xFF), - PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); + PCI_FUNC(source_id & 0xFF), pasid, addr, + fault_reason, reason); return 0; } @@ -1659,8 +1723,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id) u8 fault_reason; u16 source_id; u64 guest_addr; - int type; + int type, pasid; u32 data; + bool pasid_present; /* highest 32 bits */ data = readl(iommu->reg + reg + @@ -1672,10 +1737,12 @@ irqreturn_t dmar_fault(int irq, void *dev_id) fault_reason = dma_frcd_fault_reason(data); type = dma_frcd_type(data); + pasid = dma_frcd_pasid_value(data); data = readl(iommu->reg + reg + fault_index * PRIMARY_FAULT_REG_LEN + 8); source_id = dma_frcd_source_id(data); + pasid_present = dma_frcd_pasid_present(data); guest_addr = dmar_readq(iommu->reg + reg + fault_index * PRIMARY_FAULT_REG_LEN); guest_addr = dma_frcd_page_addr(guest_addr); @@ -1688,7 +1755,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); if (!ratelimited) + /* Using pasid -1 if pasid is not present */ dmar_fault_do_one(iommu, type, fault_reason, + pasid_present ? pasid : -1, source_id, guest_addr); fault_index++; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index f2ae8a006ff8..10e79a49af9d 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -272,6 +272,8 @@ #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) -- cgit v1.2.3 From 1f76249cc3bebd6642cb641a22fc2f302707bfbb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Sep 2019 12:00:10 +0100 Subject: iommu/vt-d: Declare Broadwell igfx dmar support snafu Despite the widespread and complete failure of Broadwell integrated graphics when DMAR is enabled, known over the years, we have never been able to root cause the issue. Instead, we let the failure undermine our confidence in the iommu system itself when we should be pushing for it to be always enabled. Quirk away Broadwell and remove the rotten apple. References: https://bugs.freedesktop.org/show_bug.cgi?id=89360 Signed-off-by: Chris Wilson Cc: Lu Baolu Cc: Martin Peres Cc: Joerg Roedel Reviewed-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b034fe727ead..518de728ae5c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5913,20 +5913,46 @@ const struct iommu_ops intel_iommu_ops = { .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; -static void quirk_iommu_g4x_gfx(struct pci_dev *dev) +static void quirk_iommu_igfx(struct pci_dev *dev) { - /* G4x/GM45 integrated gfx dmar support is totally busted. */ pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); +/* G4x/GM45 integrated gfx dmar support is totally busted. */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); + +/* Broadwell igfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { -- cgit v1.2.3