summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd
diff options
context:
space:
mode:
authorVasant Hegde <vasant.hegde@amd.com>2023-09-21 12:21:37 +0300
committerJoerg Roedel <jroedel@suse.de>2023-09-25 13:39:01 +0300
commitbac05772fa9912195b2029ab757fe86773c7ff00 (patch)
tree7158b3cb89b5b34e5edd397f57c79ebe7650d905 /drivers/iommu/amd
parentba7d263b7749f8e7f227d7357963632711b5c6ea (diff)
downloadlinux-bac05772fa9912195b2029ab757fe86773c7ff00.tar.xz
iommu/amd: Refactor protection domain allocation code
To replace if-else with switch-case statement due to increasing number of domain types. No functional changes intended. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Link: https://lore.kernel.org/r/20230921092147.5930-5-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r--drivers/iommu/amd/iommu.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index c99611139ab5..bc1fa6e43794 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2078,24 +2078,8 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable;
- int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
- /*
- * Force IOMMU v1 page table when iommu=pt and
- * when allocating domain for pass-through devices.
- */
- if (type == IOMMU_DOMAIN_IDENTITY) {
- pgtable = AMD_IOMMU_V1;
- mode = PAGE_MODE_NONE;
- } else if (type == IOMMU_DOMAIN_UNMANAGED) {
- pgtable = AMD_IOMMU_V1;
- } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
- pgtable = amd_iommu_pgtable;
- } else {
- return NULL;
- }
-
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -2106,27 +2090,42 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
+ domain->nid = NUMA_NO_NODE;
+
+ switch (type) {
+ /* No need to allocate io pgtable ops in passthrough mode */
+ case IOMMU_DOMAIN_IDENTITY:
+ return domain;
+ case IOMMU_DOMAIN_DMA:
+ case IOMMU_DOMAIN_DMA_FQ:
+ pgtable = amd_iommu_pgtable;
+ break;
+ /*
+ * Force IOMMU v1 page table when allocating
+ * domain for pass-through devices.
+ */
+ case IOMMU_DOMAIN_UNMANAGED:
+ pgtable = AMD_IOMMU_V1;
+ break;
+ default:
+ goto out_err;
+ }
switch (pgtable) {
case AMD_IOMMU_V1:
- ret = protection_domain_init_v1(domain, mode);
+ ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);
break;
default:
ret = -EINVAL;
+ break;
}
if (ret)
goto out_err;
- /* No need to allocate io pgtable ops in passthrough mode */
- if (type == IOMMU_DOMAIN_IDENTITY)
- return domain;
-
- domain->nid = NUMA_NO_NODE;
-
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) {
domain_id_free(domain->id);