diff options
| author | Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | 2026-01-15 09:08:10 +0300 |
|---|---|---|
| committer | Joerg Roedel <joerg.roedel@amd.com> | 2026-01-18 12:56:13 +0300 |
| commit | 774180a74abc89fd1389f51a6f93dbfcded365c2 (patch) | |
| tree | a89b0215c510a68b1efe143277b70bf19c184628 | |
| parent | e113a72576d6056aa91925beaa7256533a808750 (diff) | |
| download | linux-774180a74abc89fd1389f51a6f93dbfcded365c2.tar.xz | |
iommu/amd: Add support for nested domain allocation
The nested domain is allocated with IOMMU_DOMAIN_NESTED type to store
stage-1 translation (i.e. GVA->GPA). This includes the GCR3 root pointer
table along with guest page tables. The struct iommu_hwpt_amd_guest
contains this information, and is passed from user-space as a parameter
of the struct iommu_ops.domain_alloc_nested().
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
| -rw-r--r-- | drivers/iommu/amd/Makefile | 2 | ||||
| -rw-r--r-- | drivers/iommu/amd/amd_iommu.h | 4 | ||||
| -rw-r--r-- | drivers/iommu/amd/amd_iommu_types.h | 14 | ||||
| -rw-r--r-- | drivers/iommu/amd/nested.c | 110 |
4 files changed, 129 insertions, 1 deletions
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile index 41f053b49dce..94b8ef2acb18 100644 --- a/drivers/iommu/amd/Makefile +++ b/drivers/iommu/amd/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += iommu.o init.o quirks.o ppr.o pasid.o -obj-$(CONFIG_AMD_IOMMU_IOMMUFD) += iommufd.o +obj-$(CONFIG_AMD_IOMMU_IOMMUFD) += iommufd.o nested.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index d97b9b6d76d3..aa29afe96e90 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -202,4 +202,8 @@ amd_iommu_make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry new->data128[1] = 0; } +/* NESTED */ +struct iommu_domain * +amd_iommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data); #endif /* AMD_IOMMU_H */ diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index d5b3393ab3a9..487ee6123de5 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -21,6 +21,8 @@ #include <linux/irqreturn.h> #include <linux/generic_pt/iommu.h> +#include <uapi/linux/iommufd.h> + /* * Maximum number of IOMMUs supported */ @@ -353,6 +355,8 @@ #define DTE_FLAG_V BIT_ULL(0) #define DTE_FLAG_TV BIT_ULL(1) #define DTE_FLAG_HAD (3ULL << 7) +#define DTE_MODE_MASK GENMASK_ULL(11, 9) +#define DTE_HOST_TRP GENMASK_ULL(51, 12) #define DTE_FLAG_GIOV BIT_ULL(54) #define DTE_FLAG_GV BIT_ULL(55) #define DTE_GLX GENMASK_ULL(57, 56) @@ -502,6 +506,16 @@ struct amd_iommu_viommu { }; /* + * Nested domain is specifically used for nested translation + */ +struct nested_domain { + struct iommu_domain domain; /* generic domain handle used by iommu core code */ + u16 gdom_id; /* domain ID from gDTE */ + struct iommu_hwpt_amd_guest gdte; /* Guest vIOMMU DTE */ + struct amd_iommu_viommu *viommu; /* AMD hw-viommu this nested domain belong to */ +}; + +/* * This structure contains generic data for IOMMU protection domains * independent of their use. */ diff --git a/drivers/iommu/amd/nested.c b/drivers/iommu/amd/nested.c new file mode 100644 index 000000000000..a8c0bb4dd733 --- /dev/null +++ b/drivers/iommu/amd/nested.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#define dev_fmt(fmt) "AMD-Vi: " fmt + +#include <linux/iommu.h> +#include <uapi/linux/iommufd.h> + +#include "amd_iommu.h" + +static const struct iommu_domain_ops nested_domain_ops; + +static inline struct nested_domain *to_ndomain(struct iommu_domain *dom) +{ + return container_of(dom, struct nested_domain, domain); +} + +/* + * Validate guest DTE to make sure that configuration for host (v1) + * and guest (v2) page tables are valid when allocating nested domain. + */ +static int validate_gdte_nested(struct iommu_hwpt_amd_guest *gdte) +{ + u32 gpt_level = FIELD_GET(DTE_GPT_LEVEL_MASK, gdte->dte[2]); + + /* Must be zero: Mode, Host-TPR */ + if (FIELD_GET(DTE_MODE_MASK, gdte->dte[0]) != 0 || + FIELD_GET(DTE_HOST_TRP, gdte->dte[0]) != 0) + return -EINVAL; + + /* GCR3 TRP must be non-zero if V, GV is set */ + if (FIELD_GET(DTE_FLAG_V, gdte->dte[0]) == 1 && + FIELD_GET(DTE_FLAG_GV, gdte->dte[0]) == 1 && + FIELD_GET(DTE_GCR3_14_12, gdte->dte[0]) == 0 && + FIELD_GET(DTE_GCR3_30_15, gdte->dte[1]) == 0 && + FIELD_GET(DTE_GCR3_51_31, gdte->dte[1]) == 0) + return -EINVAL; + + /* Valid Guest Paging Mode values are 0 and 1 */ + if (gpt_level != GUEST_PGTABLE_4_LEVEL && + gpt_level != GUEST_PGTABLE_5_LEVEL) + return -EINVAL; + + /* GLX = 3 is reserved */ + if (FIELD_GET(DTE_GLX, gdte->dte[0]) == 3) + return -EINVAL; + + /* + * We need to check host capability before setting + * the Guest Paging Mode + */ + if (gpt_level == GUEST_PGTABLE_5_LEVEL && + amd_iommu_gpt_level < PAGE_MODE_5_LEVEL) + return -EOPNOTSUPP; + + return 0; +} + +/* + * This function is assigned to struct iommufd_viommu_ops.alloc_domain_nested() + * during the call to struct iommu_ops.viommu_init(). + */ +struct iommu_domain * +amd_iommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data) +{ + int ret; + struct nested_domain *ndom; + struct amd_iommu_viommu *aviommu = container_of(viommu, struct amd_iommu_viommu, core); + + if (user_data->type != IOMMU_HWPT_DATA_AMD_GUEST) + return ERR_PTR(-EOPNOTSUPP); + + ndom = kzalloc(sizeof(*ndom), GFP_KERNEL); + if (!ndom) + return ERR_PTR(-ENOMEM); + + ret = iommu_copy_struct_from_user(&ndom->gdte, user_data, + IOMMU_HWPT_DATA_AMD_GUEST, + dte); + if (ret) + goto out_err; + + ret = validate_gdte_nested(&ndom->gdte); + if (ret) + goto out_err; + + ndom->gdom_id = FIELD_GET(DTE_DOMID_MASK, ndom->gdte.dte[1]); + ndom->domain.ops = &nested_domain_ops; + ndom->domain.type = IOMMU_DOMAIN_NESTED; + ndom->viommu = aviommu; + + return &ndom->domain; +out_err: + kfree(ndom); + return ERR_PTR(ret); +} + +static void nested_domain_free(struct iommu_domain *dom) +{ + struct nested_domain *ndom = to_ndomain(dom); + + kfree(ndom); +} + +static const struct iommu_domain_ops nested_domain_ops = { + .free = nested_domain_free, +}; |
