summaryrefslogtreecommitdiff
path: root/drivers/iommu/mtk_iommu_v1.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-09-13 16:43:57 +0300
committerJoerg Roedel <jroedel@suse.de>2023-09-25 12:41:06 +0300
commit4efd98d41ea71835f4d291176ff1fd0a1803dfd5 (patch)
treed0833cf80ade91dd86d71a12f992da983ccf7746 /drivers/iommu/mtk_iommu_v1.c
parent3529375e7777b0f9b77c53df9a7da122bd165ae7 (diff)
downloadlinux-4efd98d41ea71835f4d291176ff1fd0a1803dfd5.tar.xz
iommu: Convert remaining simple drivers to domain_alloc_paging()
These drivers don't support IOMMU_DOMAIN_DMA, so this commit effectively allows them to support that mode. The prior work to require default_domains makes this safe because every one of these drivers is either compilation incompatible with dma-iommu.c, or already establishing a default_domain. In both cases alloc_domain() will never be called with IOMMU_DOMAIN_DMA for these drivers so it is safe to drop the test. Removing these tests clarifies that the domain allocation path is only about the functionality of a paging domain and has nothing to do with policy of how the paging domain is used for UNMANAGED/DMA/DMA_FQ. Tested-by: Niklas Schnelle <schnelle@linux.ibm.com> Tested-by: Steven Price <steven.price@arm.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/24-v8-81230027b2fa+9d-iommu_all_defdom_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/mtk_iommu_v1.c')
-rw-r--r--drivers/iommu/mtk_iommu_v1.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 7c0c1d50df5f..67e044c1a7d9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -270,13 +270,10 @@ static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
return 0;
}
-static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
{
struct mtk_iommu_v1_domain *dom;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
@@ -585,7 +582,7 @@ static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
static const struct iommu_ops mtk_iommu_v1_ops = {
.identity_domain = &mtk_iommu_v1_identity_domain,
- .domain_alloc = mtk_iommu_v1_domain_alloc,
+ .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
.probe_device = mtk_iommu_v1_probe_device,
.probe_finalize = mtk_iommu_v1_probe_finalize,
.release_device = mtk_iommu_v1_release_device,