summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1
-rw-r--r--drivers/iommu/iommufd/Kconfig4
-rw-r--r--drivers/iommu/iommufd/device.c116
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h2
-rw-r--r--drivers/iommu/iommufd/main.c26
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c1
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1
-rw-r--r--drivers/vfio/Kconfig27
-rw-r--r--drivers/vfio/Makefile3
-rw-r--r--drivers/vfio/cdx/main.c1
-rw-r--r--drivers/vfio/device_cdev.c228
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c15
-rw-r--r--drivers/vfio/group.c173
-rw-r--r--drivers/vfio/iommufd.c138
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c2
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c48
-rw-r--r--drivers/vfio/pci/mlx5/main.c1
-rw-r--r--drivers/vfio/pci/pds/Kconfig19
-rw-r--r--drivers/vfio/pci/pds/Makefile11
-rw-r--r--drivers/vfio/pci/pds/cmds.c510
-rw-r--r--drivers/vfio/pci/pds/cmds.h25
-rw-r--r--drivers/vfio/pci/pds/dirty.c564
-rw-r--r--drivers/vfio/pci/pds/dirty.h39
-rw-r--r--drivers/vfio/pci/pds/lm.c434
-rw-r--r--drivers/vfio/pci/pds/lm.h41
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c209
-rw-r--r--drivers/vfio/pci/pds/pci_drv.h9
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c227
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.h39
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c261
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/vfio.h218
-rw-r--r--drivers/vfio/vfio_iommu_type1.c13
-rw-r--r--drivers/vfio/vfio_main.c307
40 files changed, 3403 insertions, 343 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index de675d799c7d..9cd9e9da60dd 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1474,6 +1474,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index ada693ea51a7..99d4b075df49 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -14,8 +14,8 @@ config IOMMUFD
if IOMMUFD
config IOMMUFD_VFIO_CONTAINER
bool "IOMMUFD provides the VFIO container /dev/vfio/vfio"
- depends on VFIO && !VFIO_CONTAINER
- default VFIO && !VFIO_CONTAINER
+ depends on VFIO_GROUP && !VFIO_CONTAINER
+ default VFIO_GROUP && !VFIO_CONTAINER
help
IOMMUFD will provide /dev/vfio/vfio instead of VFIO. This relies on
IOMMUFD providing compatibility emulation to give the same ioctls.
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index ed2937a4e196..1d49aad48c67 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -99,6 +99,36 @@ out_group_put:
EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
/**
+ * iommufd_ctx_has_group - True if any device within the group is bound
+ * to the ictx
+ * @ictx: iommufd file descriptor
+ * @group: Pointer to a physical iommu_group struct
+ *
+ * True if any device within the group has been bound to this ictx, ex. via
+ * iommufd_device_bind(), therefore implying ictx ownership of the group.
+ */
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group)
+{
+ struct iommufd_object *obj;
+ unsigned long index;
+
+ if (!ictx || !group)
+ return false;
+
+ xa_lock(&ictx->objects);
+ xa_for_each(&ictx->objects, index, obj) {
+ if (obj->type == IOMMUFD_OBJ_DEVICE &&
+ container_of(obj, struct iommufd_device, obj)->group == group) {
+ xa_unlock(&ictx->objects);
+ return true;
+ }
+ }
+ xa_unlock(&ictx->objects);
+ return false;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_has_group, IOMMUFD);
+
+/**
* iommufd_device_unbind - Undo iommufd_device_bind()
* @idev: Device returned by iommufd_device_bind()
*
@@ -113,6 +143,18 @@ void iommufd_device_unbind(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev)
+{
+ return idev->ictx;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_ictx, IOMMUFD);
+
+u32 iommufd_device_to_id(struct iommufd_device *idev)
+{
+ return idev->obj.id;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD);
+
static int iommufd_device_setup_msi(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt,
phys_addr_t sw_msi_start)
@@ -441,6 +483,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
iommufd_ctx_get(ictx);
iommufd_object_finalize(ictx, &access->obj);
*id = access->obj.id;
+ mutex_init(&access->ioas_lock);
return access;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
@@ -457,26 +500,60 @@ void iommufd_access_destroy(struct iommufd_access *access)
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
+void iommufd_access_detach(struct iommufd_access *access)
+{
+ struct iommufd_ioas *cur_ioas = access->ioas;
+
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(!access->ioas))
+ goto out;
+ /*
+ * Set ioas to NULL to block any further iommufd_access_pin_pages().
+ * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
+ */
+ access->ioas = NULL;
+
+ if (access->ops->unmap) {
+ mutex_unlock(&access->ioas_lock);
+ access->ops->unmap(access->data, 0, ULONG_MAX);
+ mutex_lock(&access->ioas_lock);
+ }
+ iopt_remove_access(&cur_ioas->iopt, access);
+ refcount_dec(&cur_ioas->obj.users);
+out:
+ access->ioas_unpin = NULL;
+ mutex_unlock(&access->ioas_lock);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_detach, IOMMUFD);
+
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
{
struct iommufd_ioas *new_ioas;
int rc = 0;
- if (access->ioas)
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(access->ioas || access->ioas_unpin)) {
+ mutex_unlock(&access->ioas_lock);
return -EINVAL;
+ }
new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
- if (IS_ERR(new_ioas))
+ if (IS_ERR(new_ioas)) {
+ mutex_unlock(&access->ioas_lock);
return PTR_ERR(new_ioas);
+ }
rc = iopt_add_access(&new_ioas->iopt, access);
if (rc) {
+ mutex_unlock(&access->ioas_lock);
iommufd_put_object(&new_ioas->obj);
return rc;
}
iommufd_ref_to_users(&new_ioas->obj);
access->ioas = new_ioas;
+ access->ioas_unpin = new_ioas;
+ mutex_unlock(&access->ioas_lock);
return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
@@ -531,8 +608,8 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
@@ -540,6 +617,17 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
return;
+ mutex_lock(&access->ioas_lock);
+ /*
+ * The driver must be doing something wrong if it calls this before an
+ * iommufd_access_attach() or after an iommufd_access_detach().
+ */
+ if (WARN_ON(!access->ioas_unpin)) {
+ mutex_unlock(&access->ioas_lock);
+ return;
+ }
+ iopt = &access->ioas_unpin->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
iopt_area_remove_access(
@@ -549,6 +637,7 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
min(last_iova, iopt_area_last_iova(area))));
WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
@@ -594,8 +683,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
int rc;
@@ -610,6 +699,13 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -640,6 +736,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return 0;
err_remove:
@@ -654,6 +751,7 @@ err_remove:
iopt_area_last_iova(area))));
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
@@ -673,8 +771,8 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
void *data, size_t length, unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
struct iopt_area *area;
unsigned long last_iova;
int rc;
@@ -684,6 +782,13 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -710,6 +815,7 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
rc = -ENOENT;
err_out:
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index f9790983699c..0349d7aea59a 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -296,6 +296,8 @@ struct iommufd_access {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
struct iommufd_ioas *ioas;
+ struct iommufd_ioas *ioas_unpin;
+ struct mutex ioas_lock;
const struct iommufd_access_ops *ops;
void *data;
unsigned long iova_alignment;
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 4cf5f73f2708..5f34be2a1f7f 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -50,7 +50,7 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
* before calling iommufd_object_finalize().
*/
rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY,
- xa_limit_32b, GFP_KERNEL_ACCOUNT);
+ xa_limit_31b, GFP_KERNEL_ACCOUNT);
if (rc)
goto out_free;
return obj;
@@ -418,6 +418,30 @@ struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, IOMMUFD);
/**
+ * iommufd_ctx_from_fd - Acquires a reference to the iommufd context
+ * @fd: File descriptor to obtain the reference from
+ *
+ * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. On success
+ * the caller is responsible to call iommufd_ctx_put().
+ */
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (file->f_op != &iommufd_fops) {
+ fput(file);
+ return ERR_PTR(-EBADFD);
+ }
+ /* fget is the same as iommufd_ctx_get() */
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_fd, IOMMUFD);
+
+/**
* iommufd_ctx_put - Put back a reference
* @ictx: Context to put back
*/
diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
index fe02517c73cc..6c810bf80f99 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -483,6 +483,8 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
rc = cap_size;
goto out_put;
}
+ cap_size = ALIGN(cap_size, sizeof(u64));
+
if (last_cap && info.argsz >= total_cap_size &&
put_user(total_cap_size, &last_cap->next)) {
rc = -EFAULT;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 6787a5fae908..11c23a7f3172 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -8,24 +8,19 @@
/**
* pds_client_register - Link the client to the firmware
- * @pf_pdev: ptr to the PF driver struct
+ * @pf: ptr to the PF driver's private data struct
* @devname: name that includes service into, e.g. pds_core.vDPA
*
* Return: positive client ID (ci) on success, or
* negative for error
*/
-int pds_client_register(struct pci_dev *pf_pdev, char *devname)
+int pds_client_register(struct pdsc *pf, char *devname)
{
union pds_core_adminq_comp comp = {};
union pds_core_adminq_cmd cmd = {};
- struct pdsc *pf;
int err;
u16 ci;
- pf = pci_get_drvdata(pf_pdev);
- if (pf->state)
- return -ENXIO;
-
cmd.client_reg.opcode = PDS_AQ_CMD_CLIENT_REG;
strscpy(cmd.client_reg.devname, devname,
sizeof(cmd.client_reg.devname));
@@ -53,23 +48,18 @@ EXPORT_SYMBOL_GPL(pds_client_register);
/**
* pds_client_unregister - Unlink the client from the firmware
- * @pf_pdev: ptr to the PF driver struct
+ * @pf: ptr to the PF driver's private data struct
* @client_id: id returned from pds_client_register()
*
* Return: 0 on success, or
* negative for error
*/
-int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id)
+int pds_client_unregister(struct pdsc *pf, u16 client_id)
{
union pds_core_adminq_comp comp = {};
union pds_core_adminq_cmd cmd = {};
- struct pdsc *pf;
int err;
- pf = pci_get_drvdata(pf_pdev);
- if (pf->state)
- return -ENXIO;
-
cmd.client_unreg.opcode = PDS_AQ_CMD_CLIENT_UNREG;
cmd.client_unreg.client_id = cpu_to_le16(client_id);
@@ -198,7 +188,7 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
padev = pf->vfs[cf->vf_id].padev;
if (padev) {
- pds_client_unregister(pf->pdev, padev->client_id);
+ pds_client_unregister(pf, padev->client_id);
auxiliary_device_delete(&padev->aux_dev);
auxiliary_device_uninit(&padev->aux_dev);
padev->client_id = 0;
@@ -243,7 +233,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
*/
snprintf(devname, sizeof(devname), "%s.%s.%d",
PDS_CORE_DRV_NAME, pf->viftype_status[vt].name, cf->uid);
- client_id = pds_client_register(pf->pdev, devname);
+ client_id = pds_client_register(pf, devname);
if (client_id < 0) {
err = client_id;
goto out_unlock;
@@ -252,7 +242,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
padev = pdsc_auxbus_dev_register(cf, pf, client_id,
pf->viftype_status[vt].name);
if (IS_ERR(padev)) {
- pds_client_unregister(pf->pdev, client_id);
+ pds_client_unregister(pf, client_id);
err = PTR_ERR(padev);
goto out_unlock;
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 5b53b94f13c7..cba4971618ff 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -632,6 +632,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
struct mdev_driver vfio_ccw_mdev_driver = {
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 0509f80622cd..4db538a55192 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -2020,6 +2020,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
.request = vfio_ap_mdev_request
};
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index aba36f5be4ec..6bda6dbb4878 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -4,6 +4,8 @@ menuconfig VFIO
select IOMMU_API
depends on IOMMUFD || !IOMMUFD
select INTERVAL_TREE
+ select VFIO_GROUP if SPAPR_TCE_IOMMU || IOMMUFD=n
+ select VFIO_DEVICE_CDEV if !VFIO_GROUP
select VFIO_CONTAINER if IOMMUFD=n
help
VFIO provides a framework for secure userspace device drivers.
@@ -12,9 +14,33 @@ menuconfig VFIO
If you don't know what to do here, say N.
if VFIO
+config VFIO_DEVICE_CDEV
+ bool "Support for the VFIO cdev /dev/vfio/devices/vfioX"
+ depends on IOMMUFD && !SPAPR_TCE_IOMMU
+ default !VFIO_GROUP
+ help
+ The VFIO device cdev is another way for userspace to get device
+ access. Userspace gets device fd by opening device cdev under
+ /dev/vfio/devices/vfioX, and then bind the device fd with an iommufd
+ to set up secure DMA context for device access. This interface does
+ not support noiommu.
+
+ If you don't know what to do here, say N.
+
+config VFIO_GROUP
+ bool "Support for the VFIO group /dev/vfio/$group_id"
+ default y
+ help
+ VFIO group support provides the traditional model for accessing
+ devices through VFIO and is used by the majority of userspace
+ applications and drivers making use of VFIO.
+
+ If you don't know what to do here, say Y.
+
config VFIO_CONTAINER
bool "Support for the VFIO container /dev/vfio/vfio"
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ depends on VFIO_GROUP
default y
help
The VFIO container is the classic interface to VFIO for establishing
@@ -36,6 +62,7 @@ endif
config VFIO_NOIOMMU
bool "VFIO No-IOMMU support"
+ depends on VFIO_GROUP
help
VFIO is built on the ability to isolate devices using the IOMMU.
Only with an IOMMU can userspace access to DMA capable devices be
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 66f418aef5a9..c82ea032d352 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -2,8 +2,9 @@
obj-$(CONFIG_VFIO) += vfio.o
vfio-y += vfio_main.o \
- group.o \
iova_bitmap.o
+vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o
+vfio-$(CONFIG_VFIO_GROUP) += group.o
vfio-$(CONFIG_IOMMUFD) += iommufd.o
vfio-$(CONFIG_VFIO_CONTAINER) += container.o
vfio-$(CONFIG_VFIO_VIRQFD) += virqfd.o
diff --git a/drivers/vfio/cdx/main.c b/drivers/vfio/cdx/main.c
index c376a69d2db2..de56686581ae 100644
--- a/drivers/vfio/cdx/main.c
+++ b/drivers/vfio/cdx/main.c
@@ -223,7 +223,6 @@ static struct cdx_driver vfio_cdx_driver = {
.match_id_table = vfio_cdx_table,
.driver = {
.name = "vfio-cdx",
- .owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
new file mode 100644
index 000000000000..e75da0a70d1f
--- /dev/null
+++ b/drivers/vfio/device_cdev.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Intel Corporation.
+ */
+#include <linux/vfio.h>
+#include <linux/iommufd.h>
+
+#include "vfio.h"
+
+static dev_t device_devt;
+
+void vfio_init_device_cdev(struct vfio_device *device)
+{
+ device->device.devt = MKDEV(MAJOR(device_devt), device->index);
+ cdev_init(&device->cdev, &vfio_device_fops);
+ device->cdev.owner = THIS_MODULE;
+}
+
+/*
+ * device access via the fd opened by this function is blocked until
+ * .open_device() is called successfully during BIND_IOMMUFD.
+ */
+int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_device *device = container_of(inode->i_cdev,
+ struct vfio_device, cdev);
+ struct vfio_device_file *df;
+ int ret;
+
+ /* Paired with the put in vfio_device_fops_release() */
+ if (!vfio_device_try_get_registration(device))
+ return -ENODEV;
+
+ df = vfio_allocate_device_file(device);
+ if (IS_ERR(df)) {
+ ret = PTR_ERR(df);
+ goto err_put_registration;
+ }
+
+ filep->private_data = df;
+
+ return 0;
+
+err_put_registration:
+ vfio_device_put_registration(device);
+ return ret;
+}
+
+static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
+{
+ spin_lock(&df->kvm_ref_lock);
+ vfio_device_get_kvm_safe(df->device, df->kvm);
+ spin_unlock(&df->kvm_ref_lock);
+}
+
+long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_bind_iommufd bind;
+ unsigned long minsz;
+ int ret;
+
+ static_assert(__same_type(arg->out_devid, df->devid));
+
+ minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
+
+ if (copy_from_user(&bind, arg, minsz))
+ return -EFAULT;
+
+ if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
+ return -EINVAL;
+
+ /* BIND_IOMMUFD only allowed for cdev fds */
+ if (df->group)
+ return -EINVAL;
+
+ ret = vfio_device_block_group(device);
+ if (ret)
+ return ret;
+
+ mutex_lock(&device->dev_set->lock);
+ /* one device cannot be bound twice */
+ if (df->access_granted) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
+ if (IS_ERR(df->iommufd)) {
+ ret = PTR_ERR(df->iommufd);
+ df->iommufd = NULL;
+ goto out_unlock;
+ }
+
+ /*
+ * Before the device open, get the KVM pointer currently
+ * associated with the device file (if there is) and obtain
+ * a reference. This reference is held until device closed.
+ * Save the pointer in the device for use by drivers.
+ */
+ vfio_df_get_kvm_safe(df);
+
+ ret = vfio_df_open(df);
+ if (ret)
+ goto out_put_kvm;
+
+ ret = copy_to_user(&arg->out_devid, &df->devid,
+ sizeof(df->devid)) ? -EFAULT : 0;
+ if (ret)
+ goto out_close_device;
+
+ device->cdev_opened = true;
+ /*
+ * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
+ * read/write/mmap
+ */
+ smp_store_release(&df->access_granted, true);
+ mutex_unlock(&device->dev_set->lock);
+ return 0;
+
+out_close_device:
+ vfio_df_close(df);
+out_put_kvm:
+ vfio_device_put_kvm(device);
+ iommufd_ctx_put(df->iommufd);
+ df->iommufd = NULL;
+out_unlock:
+ mutex_unlock(&device->dev_set->lock);
+ vfio_device_unblock_group(device);
+ return ret;
+}
+
+void vfio_df_unbind_iommufd(struct vfio_device_file *df)
+{
+ struct vfio_device *device = df->device;
+
+ /*
+ * In the time of close, there is no contention with another one
+ * changing this flag. So read df->access_granted without lock
+ * and no smp_load_acquire() is ok.
+ */
+ if (!df->access_granted)
+ return;
+
+ mutex_lock(&device->dev_set->lock);
+ vfio_df_close(df);
+ vfio_device_put_kvm(device);
+ iommufd_ctx_put(df->iommufd);
+ device->cdev_opened = false;
+ mutex_unlock(&device->dev_set->lock);
+ vfio_device_unblock_group(device);
+}
+
+int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
+ struct vfio_device_attach_iommufd_pt __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_attach_iommufd_pt attach;
+ unsigned long minsz;
+ int ret;
+
+ minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
+
+ if (copy_from_user(&attach, arg, minsz))
+ return -EFAULT;
+
+ if (attach.argsz < minsz || attach.flags)
+ return -EINVAL;
+
+ mutex_lock(&device->dev_set->lock);
+ ret = device->ops->attach_ioas(device, &attach.pt_id);
+ if (ret)
+ goto out_unlock;
+
+ if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
+ ret = -EFAULT;
+ goto out_detach;
+ }
+ mutex_unlock(&device->dev_set->lock);
+
+ return 0;
+
+out_detach:
+ device->ops->detach_ioas(device);
+out_unlock:
+ mutex_unlock(&device->dev_set->lock);
+ return ret;
+}
+
+int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
+ struct vfio_device_detach_iommufd_pt __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_detach_iommufd_pt detach;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
+
+ if (copy_from_user(&detach, arg, minsz))
+ return -EFAULT;
+
+ if (detach.argsz < minsz || detach.flags)
+ return -EINVAL;
+
+ mutex_lock(&device->dev_set->lock);
+ device->ops->detach_ioas(device);
+ mutex_unlock(&device->dev_set->lock);
+
+ return 0;
+}
+
+static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
+}
+
+int vfio_cdev_init(struct class *device_class)
+{
+ device_class->devnode = vfio_device_devnode;
+ return alloc_chrdev_region(&device_devt, 0,
+ MINORMASK + 1, "vfio-dev");
+}
+
+void vfio_cdev_cleanup(void)
+{
+ unregister_chrdev_region(device_devt, MINORMASK + 1);
+}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index f2140e94d41e..f65d91c01f2e 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -593,6 +593,7 @@ static const struct vfio_device_ops vfio_fsl_mc_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct fsl_mc_driver vfio_fsl_mc_driver = {
@@ -600,23 +601,11 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = {
.remove = vfio_fsl_mc_remove,
.driver = {
.name = "vfio-fsl-mc",
- .owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
-static int __init vfio_fsl_mc_driver_init(void)
-{
- return fsl_mc_driver_register(&vfio_fsl_mc_driver);
-}
-
-static void __exit vfio_fsl_mc_driver_exit(void)
-{
- fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
-}
-
-module_init(vfio_fsl_mc_driver_init);
-module_exit(vfio_fsl_mc_driver_exit);
+module_fsl_mc_driver(vfio_fsl_mc_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index fc75c1000d74..610a429c6191 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -160,17 +160,13 @@ out_unlock:
static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
{
spin_lock(&device->group->kvm_ref_lock);
- if (!device->group->kvm)
- goto unlock;
-
- _vfio_device_get_kvm_safe(device, device->group->kvm);
-
-unlock:
+ vfio_device_get_kvm_safe(device, device->group->kvm);
spin_unlock(&device->group->kvm_ref_lock);
}
-static int vfio_device_group_open(struct vfio_device *device)
+static int vfio_df_group_open(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
int ret;
mutex_lock(&device->group->group_lock);
@@ -190,24 +186,62 @@ static int vfio_device_group_open(struct vfio_device *device)
if (device->open_count == 0)
vfio_device_group_get_kvm_safe(device);
- ret = vfio_device_open(device, device->group->iommufd);
+ df->iommufd = device->group->iommufd;
+ if (df->iommufd && vfio_device_is_noiommu(device) && device->open_count == 0) {
+ /*
+ * Require no compat ioas to be assigned to proceed. The basic
+ * statement is that the user cannot have done something that
+ * implies they expected translation to exist
+ */
+ if (!capable(CAP_SYS_RAWIO) ||
+ vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
+ ret = -EPERM;
+ else
+ ret = 0;
+ goto out_put_kvm;
+ }
- if (device->open_count == 0)
- vfio_device_put_kvm(device);
+ ret = vfio_df_open(df);
+ if (ret)
+ goto out_put_kvm;
+
+ if (df->iommufd && device->open_count == 1) {
+ ret = vfio_iommufd_compat_attach_ioas(device, df->iommufd);
+ if (ret)
+ goto out_close_device;
+ }
+
+ /*
+ * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
+ * read/write/mmap and vfio_file_has_device_access()
+ */
+ smp_store_release(&df->access_granted, true);
mutex_unlock(&device->dev_set->lock);
+ mutex_unlock(&device->group->group_lock);
+ return 0;
+out_close_device:
+ vfio_df_close(df);
+out_put_kvm:
+ df->iommufd = NULL;
+ if (device->open_count == 0)
+ vfio_device_put_kvm(device);
+ mutex_unlock(&device->dev_set->lock);
out_unlock:
mutex_unlock(&device->group->group_lock);
return ret;
}
-void vfio_device_group_close(struct vfio_device *device)
+void vfio_df_group_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+
mutex_lock(&device->group->group_lock);
mutex_lock(&device->dev_set->lock);
- vfio_device_close(device, device->group->iommufd);
+ vfio_df_close(df);
+ df->iommufd = NULL;
if (device->open_count == 0)
vfio_device_put_kvm(device);
@@ -218,19 +252,28 @@ void vfio_device_group_close(struct vfio_device *device)
static struct file *vfio_device_open_file(struct vfio_device *device)
{
+ struct vfio_device_file *df;
struct file *filep;
int ret;
- ret = vfio_device_group_open(device);
- if (ret)
+ df = vfio_allocate_device_file(device);
+ if (IS_ERR(df)) {
+ ret = PTR_ERR(df);
goto err_out;
+ }
+
+ df->group = device->group;
+
+ ret = vfio_df_group_open(df);
+ if (ret)
+ goto err_free;
/*
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- device, O_RDWR);
+ df, O_RDWR);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
@@ -253,7 +296,9 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
return filep;
err_close_device:
- vfio_device_group_close(device);
+ vfio_df_group_close(df);
+err_free:
+ kfree(df);
err_out:
return ERR_PTR(ret);
}
@@ -357,6 +402,33 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
}
}
+int vfio_device_block_group(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+ int ret = 0;
+
+ mutex_lock(&group->group_lock);
+ if (group->opened_file) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ group->cdev_device_open_cnt++;
+
+out_unlock:
+ mutex_unlock(&group->group_lock);
+ return ret;
+}
+
+void vfio_device_unblock_group(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+
+ mutex_lock(&group->group_lock);
+ group->cdev_device_open_cnt--;
+ mutex_unlock(&group->group_lock);
+}
+
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group =
@@ -379,6 +451,11 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
goto out_unlock;
}
+ if (group->cdev_device_open_cnt) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
/*
* Do we need multiple instances of the group open? Seems not.
*/
@@ -453,6 +530,7 @@ static void vfio_group_release(struct device *dev)
mutex_destroy(&group->device_lock);
mutex_destroy(&group->group_lock);
WARN_ON(group->iommu_group);
+ WARN_ON(group->cdev_device_open_cnt);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
}
@@ -604,16 +682,6 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
if (!iommu_group)
return ERR_PTR(-EINVAL);
- /*
- * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
- * restore cache coherency. It has to be checked here because it is only
- * valid for cases where we are using iommu groups.
- */
- if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
- iommu_group_put(iommu_group);
- return ERR_PTR(-EINVAL);
- }
-
mutex_lock(&vfio.group_lock);
group = vfio_group_find_from_iommu(iommu_group);
if (group) {
@@ -745,6 +813,15 @@ bool vfio_device_has_container(struct vfio_device *device)
return device->group->container;
}
+struct vfio_group *vfio_group_from_file(struct file *file)
+{
+ struct vfio_group *group = file->private_data;
+
+ if (file->f_op != &vfio_group_fops)
+ return NULL;
+ return group;
+}
+
/**
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file
@@ -755,13 +832,13 @@ bool vfio_device_has_container(struct vfio_device *device)
*/
struct iommu_group *vfio_file_iommu_group(struct file *file)
{
- struct vfio_group *group = file->private_data;
+ struct vfio_group *group = vfio_group_from_file(file);
struct iommu_group *iommu_group = NULL;
if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
return NULL;
- if (!vfio_file_is_group(file))
+ if (!group)
return NULL;
mutex_lock(&group->group_lock);
@@ -775,33 +852,20 @@ struct iommu_group *vfio_file_iommu_group(struct file *file)
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
/**
- * vfio_file_is_group - True if the file is usable with VFIO aPIS
+ * vfio_file_is_group - True if the file is a vfio group file
* @file: VFIO group file
*/
bool vfio_file_is_group(struct file *file)
{
- return file->f_op == &vfio_group_fops;
+ return vfio_group_from_file(file);
}
EXPORT_SYMBOL_GPL(vfio_file_is_group);
-/**
- * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
- * is always CPU cache coherent
- * @file: VFIO group file
- *
- * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
- * bit in DMA transactions. A return of false indicates that the user has
- * rights to access additional instructions such as wbinvd on x86.
- */
-bool vfio_file_enforced_coherent(struct file *file)
+bool vfio_group_enforced_coherent(struct vfio_group *group)
{
- struct vfio_group *group = file->private_data;
struct vfio_device *device;
bool ret = true;
- if (!vfio_file_is_group(file))
- return true;
-
/*
* If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then
* any domain later attached to it will also not support it. If the cap
@@ -819,28 +883,13 @@ bool vfio_file_enforced_coherent(struct file *file)
mutex_unlock(&group->device_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
-/**
- * vfio_file_set_kvm - Link a kvm with VFIO drivers
- * @file: VFIO group file
- * @kvm: KVM to link
- *
- * When a VFIO device is first opened the KVM will be available in
- * device->kvm if one was associated with the group.
- */
-void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
{
- struct vfio_group *group = file->private_data;
-
- if (!vfio_file_is_group(file))
- return;
-
spin_lock(&group->kvm_ref_lock);
group->kvm = kvm;
spin_unlock(&group->kvm_ref_lock);
}
-EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
/**
* vfio_file_has_dev - True if the VFIO file is a handle for device
@@ -851,9 +900,9 @@ EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
*/
bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
- struct vfio_group *group = file->private_data;
+ struct vfio_group *group = vfio_group_from_file(file);
- if (!vfio_file_is_group(file))
+ if (!group)
return false;
return group == device->group;
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
index 88b00c501015..4d84904fd927 100644
--- a/drivers/vfio/iommufd.c
+++ b/drivers/vfio/iommufd.c
@@ -10,53 +10,48 @@
MODULE_IMPORT_NS(IOMMUFD);
MODULE_IMPORT_NS(IOMMUFD_VFIO);
-int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx)
+{
+ u32 ioas_id;
+
+ return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
+}
+
+int vfio_df_iommufd_bind(struct vfio_device_file *df)
+{
+ struct vfio_device *vdev = df->device;
+ struct iommufd_ctx *ictx = df->iommufd;
+
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
+}
+
+int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx)
{
u32 ioas_id;
- u32 device_id;
int ret;
lockdep_assert_held(&vdev->dev_set->lock);
- if (vfio_device_is_noiommu(vdev)) {
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /*
- * Require no compat ioas to be assigned to proceed. The basic
- * statement is that the user cannot have done something that
- * implies they expected translation to exist
- */
- if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
- return -EPERM;
+ /* compat noiommu does not need to do ioas attach */
+ if (vfio_device_is_noiommu(vdev))
return 0;
- }
-
- ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
- if (ret)
- return ret;
ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
if (ret)
- goto err_unbind;
- ret = vdev->ops->attach_ioas(vdev, &ioas_id);
- if (ret)
- goto err_unbind;
-
- /*
- * The legacy path has no way to return the device id or the selected
- * pt_id
- */
- return 0;
+ return ret;
-err_unbind:
- if (vdev->ops->unbind_iommufd)
- vdev->ops->unbind_iommufd(vdev);
- return ret;
+ /* The legacy path has no way to return the selected pt_id */
+ return vdev->ops->attach_ioas(vdev, &ioas_id);
}
-void vfio_iommufd_unbind(struct vfio_device *vdev)
+void vfio_df_iommufd_unbind(struct vfio_device_file *df)
{
+ struct vfio_device *vdev = df->device;
+
lockdep_assert_held(&vdev->dev_set->lock);
if (vfio_device_is_noiommu(vdev))
@@ -66,6 +61,50 @@ void vfio_iommufd_unbind(struct vfio_device *vdev)
vdev->ops->unbind_iommufd(vdev);
}
+struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
+{
+ if (vdev->iommufd_device)
+ return iommufd_device_to_ictx(vdev->iommufd_device);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
+
+static int vfio_iommufd_device_id(struct vfio_device *vdev)
+{
+ if (vdev->iommufd_device)
+ return iommufd_device_to_id(vdev->iommufd_device);
+ return -EINVAL;
+}
+
+/*
+ * Return devid for a device.
+ * valid ID for the device that is owned by the ictx
+ * -ENOENT = device is owned but there is no ID
+ * -ENODEV or other error = device is not owned
+ */
+int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+{
+ struct iommu_group *group;
+ int devid;
+
+ if (vfio_iommufd_device_ictx(vdev) == ictx)
+ return vfio_iommufd_device_id(vdev);
+
+ group = iommu_group_get(vdev->dev);
+ if (!group)
+ return -ENODEV;
+
+ if (iommufd_ctx_has_group(ictx, group))
+ devid = -ENOENT;
+ else
+ devid = -ENODEV;
+
+ iommu_group_put(group);
+
+ return devid;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
+
/*
* The physical standard ops mean that the iommufd_device is bound to the
* physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
@@ -101,6 +140,14 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
{
int rc;
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device))
+ return -EINVAL;
+
+ if (vdev->iommufd_attached)
+ return -EBUSY;
+
rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
if (rc)
return rc;
@@ -109,6 +156,18 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
+void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
+{
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
+ return;
+
+ iommufd_device_detach(vdev->iommufd_device);
+ vdev->iommufd_attached = false;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
+
/*
* The emulated standard ops mean that vfio_device is going to use the
* "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
@@ -172,3 +231,16 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
+
+void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
+{
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_access) ||
+ !vdev->iommufd_attached)
+ return;
+
+ iommufd_access_detach(vdev->iommufd_access);
+ vdev->iommufd_attached = false;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 86bb7835cf3c..8125e5f37832 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -63,4 +63,6 @@ source "drivers/vfio/pci/mlx5/Kconfig"
source "drivers/vfio/pci/hisilicon/Kconfig"
+source "drivers/vfio/pci/pds/Kconfig"
+
endmenu
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 24c524224da5..45167be462d8 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
+
+obj-$(CONFIG_PDS_VFIO_PCI) += pds/
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index a117eaf21c14..b2f9778c8366 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1373,6 +1373,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1391,6 +1392,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index c82c1f4fc588..33574b04477d 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -732,52 +732,6 @@ void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf)
mlx5vf_cmd_dealloc_pd(migf);
}
-static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
- u32 req_nodes)
-{
- struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
- unsigned long min_gap;
- unsigned long curr_gap;
-
- /* Special shortcut when a single range is required */
- if (req_nodes == 1) {
- unsigned long last;
-
- curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
- while (curr) {
- last = curr->last;
- prev = curr;
- curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
- if (prev != comb_start)
- interval_tree_remove(prev, root);
- }
- comb_start->last = last;
- return;
- }
-
- /* Combine ranges which have the smallest gap */
- while (cur_nodes > req_nodes) {
- prev = NULL;
- min_gap = ULONG_MAX;
- curr = interval_tree_iter_first(root, 0, ULONG_MAX);
- while (curr) {
- if (prev) {
- curr_gap = curr->start - prev->last;
- if (curr_gap < min_gap) {
- min_gap = curr_gap;
- comb_start = prev;
- comb_end = curr;
- }
- }
- prev = curr;
- curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
- }
- comb_start->last = comb_end->last;
- interval_tree_remove(comb_end, root);
- cur_nodes--;
- }
-}
-
static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
struct mlx5vf_pci_core_device *mvdev,
struct rb_root_cached *ranges, u32 nnodes)
@@ -800,7 +754,7 @@ static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
int i;
if (num_ranges > max_num_range) {
- combine_ranges(ranges, nnodes, max_num_range);
+ vfio_combine_iova_ranges(ranges, nnodes, max_num_range);
num_ranges = max_num_range;
}
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index d95fd382814c..42ec574a8622 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -1320,6 +1320,7 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig
new file mode 100644
index 000000000000..407b3fd32733
--- /dev/null
+++ b/drivers/vfio/pci/pds/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2023 Advanced Micro Devices, Inc.
+
+config PDS_VFIO_PCI
+ tristate "VFIO support for PDS PCI devices"
+ depends on PDS_CORE
+ select VFIO_PCI_CORE
+ help
+ This provides generic PCI support for PDS devices using the VFIO
+ framework.
+
+ More specific information on this driver can be
+ found in
+ <file:Documentation/networking/device_drivers/ethernet/amd/pds_vfio_pci.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pds-vfio-pci.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/pds/Makefile b/drivers/vfio/pci/pds/Makefile
new file mode 100644
index 000000000000..d5a06d81634f
--- /dev/null
+++ b/drivers/vfio/pci/pds/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2023 Advanced Micro Devices, Inc.
+
+obj-$(CONFIG_PDS_VFIO_PCI) += pds-vfio-pci.o
+
+pds-vfio-pci-y := \
+ cmds.o \
+ dirty.o \
+ lm.o \
+ pci_drv.o \
+ vfio_dev.o
diff --git a/drivers/vfio/pci/pds/cmds.c b/drivers/vfio/pci/pds/cmds.c
new file mode 100644
index 000000000000..36463ccc3df9
--- /dev/null
+++ b/drivers/vfio/pci/pds/cmds.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+
+#include "vfio_dev.h"
+#include "cmds.h"
+
+#define SUSPEND_TIMEOUT_S 5
+#define SUSPEND_CHECK_INTERVAL_MS 1
+
+static int pds_vfio_client_adminq_cmd(struct pds_vfio_pci_device *pds_vfio,
+ union pds_core_adminq_cmd *req,
+ union pds_core_adminq_comp *resp,
+ bool fast_poll)
+{
+ struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
+ union pds_core_adminq_cmd cmd = {};
+ struct pdsc *pdsc;
+ int err;
+
+ /* Wrap the client request */
+ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ cmd.client_request.client_id = cpu_to_le16(pds_vfio->client_id);
+ memcpy(cmd.client_request.client_cmd, req,
+ sizeof(cmd.client_request.client_cmd));
+
+ pdsc = pdsc_get_pf_struct(pdev);
+ if (IS_ERR(pdsc))
+ return PTR_ERR(pdsc);
+
+ err = pdsc_adminq_post(pdsc, &cmd, resp, fast_poll);
+ if (err && err != -EAGAIN)
+ dev_err(pds_vfio_to_dev(pds_vfio),
+ "client admin cmd failed: %pe\n", ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vfio_register_client_cmd(struct pds_vfio_pci_device *pds_vfio)
+{
+ struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
+ char devname[PDS_DEVNAME_LEN];
+ struct pdsc *pdsc;
+ int ci;
+
+ snprintf(devname, sizeof(devname), "%s.%d-%u", PDS_VFIO_LM_DEV_NAME,
+ pci_domain_nr(pdev->bus),
+ PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+ pdsc = pdsc_get_pf_struct(pdev);
+ if (IS_ERR(pdsc))
+ return PTR_ERR(pdsc);
+
+ ci = pds_client_register(pdsc, devname);
+ if (ci < 0)
+ return ci;
+
+ pds_vfio->client_id = ci;
+
+ return 0;
+}
+
+void pds_vfio_unregister_client_cmd(struct pds_vfio_pci_device *pds_vfio)
+{
+ struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
+ struct pdsc *pdsc;
+ int err;
+
+ pdsc = pdsc_get_pf_struct(pdev);
+ if (IS_ERR(pdsc))
+ return;
+
+ err = pds_client_unregister(pdsc, pds_vfio->client_id);
+ if (err)
+ dev_err(&pdev->dev, "unregister from DSC failed: %pe\n",
+ ERR_PTR(err));
+
+ pds_vfio->client_id = 0;
+}
+
+static int
+pds_vfio_suspend_wait_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_suspend_status = {
+ .opcode = PDS_LM_CMD_SUSPEND_STATUS,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .type = type,
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
+ int err;
+
+ time_start = jiffies;
+ time_limit = time_start + HZ * SUSPEND_TIMEOUT_S;
+ do {
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
+ if (err != -EAGAIN)
+ break;
+
+ msleep(SUSPEND_CHECK_INTERVAL_MS);
+ } while (time_before(jiffies, time_limit));
+
+ time_done = jiffies;
+ dev_dbg(dev, "%s: vf%u: Suspend comp received in %d msecs\n", __func__,
+ pds_vfio->vf_id, jiffies_to_msecs(time_done - time_start));
+
+ /* Check the results */
+ if (time_after_eq(time_done, time_limit)) {
+ dev_err(dev, "%s: vf%u: Suspend comp timeout\n", __func__,
+ pds_vfio->vf_id);
+ err = -ETIMEDOUT;
+ }
+
+ return err;
+}
+
+int pds_vfio_suspend_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_suspend = {
+ .opcode = PDS_LM_CMD_SUSPEND,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .type = type,
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ dev_dbg(dev, "vf%u: Suspend device\n", pds_vfio->vf_id);
+
+ /*
+ * The initial suspend request to the firmware starts the device suspend
+ * operation and the firmware returns success if it's started
+ * successfully.
+ */
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
+ if (err) {
+ dev_err(dev, "vf%u: Suspend failed: %pe\n", pds_vfio->vf_id,
+ ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * The subsequent suspend status request(s) check if the firmware has
+ * completed the device suspend process.
+ */
+ return pds_vfio_suspend_wait_device_cmd(pds_vfio, type);
+}
+
+int pds_vfio_resume_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_resume = {
+ .opcode = PDS_LM_CMD_RESUME,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .type = type,
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+
+ dev_dbg(dev, "vf%u: Resume device\n", pds_vfio->vf_id);
+
+ return pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
+}
+
+int pds_vfio_get_lm_state_size_cmd(struct pds_vfio_pci_device *pds_vfio, u64 *size)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_state_size = {
+ .opcode = PDS_LM_CMD_STATE_SIZE,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ dev_dbg(dev, "vf%u: Get migration status\n", pds_vfio->vf_id);
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err)
+ return err;
+
+ *size = le64_to_cpu(comp.lm_state_size.size);
+ return 0;
+}
+
+static int pds_vfio_dma_map_lm_file(struct device *dev,
+ enum dma_data_direction dir,
+ struct pds_vfio_lm_file *lm_file)
+{
+ struct pds_lm_sg_elem *sgl, *sge;
+ struct scatterlist *sg;
+ dma_addr_t sgl_addr;
+ size_t sgl_size;
+ int err;
+ int i;
+
+ if (!lm_file)
+ return -EINVAL;
+
+ /* dma map file pages */
+ err = dma_map_sgtable(dev, &lm_file->sg_table, dir, 0);
+ if (err)
+ return err;
+
+ lm_file->num_sge = lm_file->sg_table.nents;
+
+ /* alloc sgl */
+ sgl_size = lm_file->num_sge * sizeof(struct pds_lm_sg_elem);
+ sgl = kzalloc(sgl_size, GFP_KERNEL);
+ if (!sgl) {
+ err = -ENOMEM;
+ goto out_unmap_sgtable;
+ }
+
+ /* fill sgl */
+ sge = sgl;
+ for_each_sgtable_dma_sg(&lm_file->sg_table, sg, i) {
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->len = cpu_to_le32(sg_dma_len(sg));
+ dev_dbg(dev, "addr = %llx, len = %u\n", sge->addr, sge->len);
+ sge++;
+ }
+
+ sgl_addr = dma_map_single(dev, sgl, sgl_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, sgl_addr)) {
+ err = -EIO;
+ goto out_free_sgl;
+ }
+
+ lm_file->sgl = sgl;
+ lm_file->sgl_addr = sgl_addr;
+
+ return 0;
+
+out_free_sgl:
+ kfree(sgl);
+out_unmap_sgtable:
+ lm_file->num_sge = 0;
+ dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
+ return err;
+}
+
+static void pds_vfio_dma_unmap_lm_file(struct device *dev,
+ enum dma_data_direction dir,
+ struct pds_vfio_lm_file *lm_file)
+{
+ if (!lm_file)
+ return;
+
+ /* free sgl */
+ if (lm_file->sgl) {
+ dma_unmap_single(dev, lm_file->sgl_addr,
+ lm_file->num_sge * sizeof(*lm_file->sgl),
+ DMA_TO_DEVICE);
+ kfree(lm_file->sgl);
+ lm_file->sgl = NULL;
+ lm_file->sgl_addr = DMA_MAPPING_ERROR;
+ lm_file->num_sge = 0;
+ }
+
+ /* dma unmap file pages */
+ dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
+}
+
+int pds_vfio_get_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_save = {
+ .opcode = PDS_LM_CMD_SAVE,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ },
+ };
+ struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ union pds_core_adminq_comp comp = {};
+ struct pds_vfio_lm_file *lm_file;
+ int err;
+
+ dev_dbg(&pdev->dev, "vf%u: Get migration state\n", pds_vfio->vf_id);
+
+ lm_file = pds_vfio->save_file;
+
+ err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
+ if (err) {
+ dev_err(&pdev->dev, "failed to map save migration file: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ cmd.lm_save.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
+ cmd.lm_save.num_sge = cpu_to_le32(lm_file->num_sge);
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err)
+ dev_err(&pdev->dev, "failed to get migration state: %pe\n",
+ ERR_PTR(err));
+
+ pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
+
+ return err;
+}
+
+int pds_vfio_set_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_restore = {
+ .opcode = PDS_LM_CMD_RESTORE,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ },
+ };
+ struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ union pds_core_adminq_comp comp = {};
+ struct pds_vfio_lm_file *lm_file;
+ int err;
+
+ dev_dbg(&pdev->dev, "vf%u: Set migration state\n", pds_vfio->vf_id);
+
+ lm_file = pds_vfio->restore_file;
+
+ err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to map restore migration file: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ cmd.lm_restore.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
+ cmd.lm_restore.num_sge = cpu_to_le32(lm_file->num_sge);
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err)
+ dev_err(&pdev->dev, "failed to set migration state: %pe\n",
+ ERR_PTR(err));
+
+ pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
+
+ return err;
+}
+
+void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device *pds_vfio,
+ enum pds_lm_host_vf_status vf_status)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_host_vf_status = {
+ .opcode = PDS_LM_CMD_HOST_VF_STATUS,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .status = vf_status,
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ dev_dbg(dev, "vf%u: Set host VF LM status: %u", pds_vfio->vf_id,
+ vf_status);
+ if (vf_status != PDS_LM_STA_IN_PROGRESS &&
+ vf_status != PDS_LM_STA_NONE) {
+ dev_warn(dev, "Invalid host VF migration status, %d\n",
+ vf_status);
+ return;
+ }
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err)
+ dev_warn(dev, "failed to send host VF migration status: %pe\n",
+ ERR_PTR(err));
+}
+
+int pds_vfio_dirty_status_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 regions_dma, u8 *max_regions, u8 *num_regions)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_dirty_status = {
+ .opcode = PDS_LM_CMD_DIRTY_STATUS,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ dev_dbg(dev, "vf%u: Dirty status\n", pds_vfio->vf_id);
+
+ cmd.lm_dirty_status.regions_dma = cpu_to_le64(regions_dma);
+ cmd.lm_dirty_status.max_regions = *max_regions;
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err) {
+ dev_err(dev, "failed to get dirty status: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ /* only support seq_ack approach for now */
+ if (!(le32_to_cpu(comp.lm_dirty_status.bmp_type_mask) &
+ BIT(PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK))) {
+ dev_err(dev, "Dirty bitmap tracking SEQ_ACK not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ *num_regions = comp.lm_dirty_status.num_regions;
+ *max_regions = comp.lm_dirty_status.max_regions;
+
+ dev_dbg(dev,
+ "Page Tracking Status command successful, max_regions: %d, num_regions: %d, bmp_type: %s\n",
+ *max_regions, *num_regions, "PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK");
+
+ return 0;
+}
+
+int pds_vfio_dirty_enable_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 regions_dma, u8 num_regions)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_dirty_enable = {
+ .opcode = PDS_LM_CMD_DIRTY_ENABLE,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .regions_dma = cpu_to_le64(regions_dma),
+ .bmp_type = PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK,
+ .num_regions = num_regions,
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err) {
+ dev_err(dev, "failed dirty tracking enable: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ return 0;
+}
+
+int pds_vfio_dirty_disable_cmd(struct pds_vfio_pci_device *pds_vfio)
+{
+ union pds_core_adminq_cmd cmd = {
+ .lm_dirty_disable = {
+ .opcode = PDS_LM_CMD_DIRTY_DISABLE,
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err || comp.lm_dirty_status.num_regions != 0) {
+ /* in case num_regions is still non-zero after disable */
+ err = err ? err : -EIO;
+ dev_err(dev,
+ "failed dirty tracking disable: %pe, num_regions %d\n",
+ ERR_PTR(err), comp.lm_dirty_status.num_regions);
+ return err;
+ }
+
+ return 0;
+}
+
+int pds_vfio_dirty_seq_ack_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 sgl_dma, u16 num_sge, u32 offset,
+ u32 total_len, bool read_seq)
+{
+ const char *cmd_type_str = read_seq ? "read_seq" : "write_ack";
+ union pds_core_adminq_cmd cmd = {
+ .lm_dirty_seq_ack = {
+ .vf_id = cpu_to_le16(pds_vfio->vf_id),
+ .len_bytes = cpu_to_le32(total_len),
+ .off_bytes = cpu_to_le32(offset),
+ .sgl_addr = cpu_to_le64(sgl_dma),
+ .num_sge = cpu_to_le16(num_sge),
+ },
+ };
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ if (read_seq)
+ cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_READ_SEQ;
+ else
+ cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_WRITE_ACK;
+
+ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
+ if (err) {
+ dev_err(dev, "failed cmd Page Tracking %s: %pe\n", cmd_type_str,
+ ERR_PTR(err));
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/vfio/pci/pds/cmds.h b/drivers/vfio/pci/pds/cmds.h
new file mode 100644
index 000000000000..95221100b954
--- /dev/null
+++ b/drivers/vfio/pci/pds/cmds.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _CMDS_H_
+#define _CMDS_H_
+
+int pds_vfio_register_client_cmd(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_unregister_client_cmd(struct pds_vfio_pci_device *pds_vfio);
+int pds_vfio_suspend_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type);
+int pds_vfio_resume_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type);
+int pds_vfio_get_lm_state_size_cmd(struct pds_vfio_pci_device *pds_vfio, u64 *size);
+int pds_vfio_get_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio);
+int pds_vfio_set_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device *pds_vfio,
+ enum pds_lm_host_vf_status vf_status);
+int pds_vfio_dirty_status_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 regions_dma, u8 *max_regions,
+ u8 *num_regions);
+int pds_vfio_dirty_enable_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 regions_dma, u8 num_regions);
+int pds_vfio_dirty_disable_cmd(struct pds_vfio_pci_device *pds_vfio);
+int pds_vfio_dirty_seq_ack_cmd(struct pds_vfio_pci_device *pds_vfio,
+ u64 sgl_dma, u16 num_sge, u32 offset,
+ u32 total_len, bool read_seq);
+#endif /* _CMDS_H_ */
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
new file mode 100644
index 000000000000..c937aa6f3954
--- /dev/null
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#include <linux/interval_tree.h>
+#include <linux/vfio.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+
+#include "vfio_dev.h"
+#include "cmds.h"
+#include "dirty.h"
+
+#define READ_SEQ true
+#define WRITE_ACK false
+
+bool pds_vfio_dirty_is_enabled(struct pds_vfio_pci_device *pds_vfio)
+{
+ return pds_vfio->dirty.is_enabled;
+}
+
+void pds_vfio_dirty_set_enabled(struct pds_vfio_pci_device *pds_vfio)
+{
+ pds_vfio->dirty.is_enabled = true;
+}
+
+void pds_vfio_dirty_set_disabled(struct pds_vfio_pci_device *pds_vfio)
+{
+ pds_vfio->dirty.is_enabled = false;
+}
+
+static void
+pds_vfio_print_guest_region_info(struct pds_vfio_pci_device *pds_vfio,
+ u8 max_regions)
+{
+ int len = max_regions * sizeof(struct pds_lm_dirty_region_info);
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ struct pds_lm_dirty_region_info *region_info;
+ dma_addr_t regions_dma;
+ u8 num_regions;
+ int err;
+
+ region_info = kcalloc(max_regions,
+ sizeof(struct pds_lm_dirty_region_info),
+ GFP_KERNEL);
+ if (!region_info)
+ return;
+
+ regions_dma =
+ dma_map_single(pdsc_dev, region_info, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdsc_dev, regions_dma))
+ goto out_free_region_info;
+
+ err = pds_vfio_dirty_status_cmd(pds_vfio, regions_dma, &max_regions,
+ &num_regions);
+ dma_unmap_single(pdsc_dev, regions_dma, len, DMA_FROM_DEVICE);
+ if (err)
+ goto out_free_region_info;
+
+ for (unsigned int i = 0; i < num_regions; i++)
+ dev_dbg(&pdev->dev,
+ "region_info[%d]: dma_base 0x%llx page_count %u page_size_log2 %u\n",
+ i, le64_to_cpu(region_info[i].dma_base),
+ le32_to_cpu(region_info[i].page_count),
+ region_info[i].page_size_log2);
+
+out_free_region_info:
+ kfree(region_info);
+}
+
+static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
+ unsigned long bytes)
+{
+ unsigned long *host_seq_bmp, *host_ack_bmp;
+
+ host_seq_bmp = vzalloc(bytes);
+ if (!host_seq_bmp)
+ return -ENOMEM;
+
+ host_ack_bmp = vzalloc(bytes);
+ if (!host_ack_bmp) {
+ bitmap_free(host_seq_bmp);
+ return -ENOMEM;
+ }
+
+ dirty->host_seq.bmp = host_seq_bmp;
+ dirty->host_ack.bmp = host_ack_bmp;
+
+ return 0;
+}
+
+static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
+{
+ vfree(dirty->host_seq.bmp);
+ vfree(dirty->host_ack.bmp);
+ dirty->host_seq.bmp = NULL;
+ dirty->host_ack.bmp = NULL;
+}
+
+static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_bmp_info *bmp_info)
+{
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+
+ dma_unmap_single(pdsc_dev, bmp_info->sgl_addr,
+ bmp_info->num_sge * sizeof(struct pds_lm_sg_elem),
+ DMA_BIDIRECTIONAL);
+ kfree(bmp_info->sgl);
+
+ bmp_info->num_sge = 0;
+ bmp_info->sgl = NULL;
+ bmp_info->sgl_addr = 0;
+}
+
+static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
+{
+ if (pds_vfio->dirty.host_seq.sgl)
+ __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_seq);
+ if (pds_vfio->dirty.host_ack.sgl)
+ __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_ack);
+}
+
+static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_bmp_info *bmp_info,
+ u32 page_count)
+{
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ struct pds_lm_sg_elem *sgl;
+ dma_addr_t sgl_addr;
+ size_t sgl_size;
+ u32 max_sge;
+
+ max_sge = DIV_ROUND_UP(page_count, PAGE_SIZE * 8);
+ sgl_size = max_sge * sizeof(struct pds_lm_sg_elem);
+
+ sgl = kzalloc(sgl_size, GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sgl_addr = dma_map_single(pdsc_dev, sgl, sgl_size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pdsc_dev, sgl_addr)) {
+ kfree(sgl);
+ return -EIO;
+ }
+
+ bmp_info->sgl = sgl;
+ bmp_info->num_sge = max_sge;
+ bmp_info->sgl_addr = sgl_addr;
+
+ return 0;
+}
+
+static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
+ u32 page_count)
+{
+ struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+ int err;
+
+ err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_seq,
+ page_count);
+ if (err)
+ return err;
+
+ err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_ack,
+ page_count);
+ if (err) {
+ __pds_vfio_dirty_free_sgl(pds_vfio, &dirty->host_seq);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size)
+{
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+ u64 region_start, region_size, region_page_size;
+ struct pds_lm_dirty_region_info *region_info;
+ struct interval_tree_node *node = NULL;
+ u8 max_regions = 0, num_regions;
+ dma_addr_t regions_dma = 0;
+ u32 num_ranges = nnodes;
+ u32 page_count;
+ u16 len;
+ int err;
+
+ dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
+ pds_vfio->vf_id);
+
+ if (pds_vfio_dirty_is_enabled(pds_vfio))
+ return -EINVAL;
+
+ /* find if dirty tracking is disabled, i.e. num_regions == 0 */
+ err = pds_vfio_dirty_status_cmd(pds_vfio, 0, &max_regions,
+ &num_regions);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to get dirty status, err %pe\n",
+ ERR_PTR(err));
+ return err;
+ } else if (num_regions) {
+ dev_err(&pdev->dev,
+ "Dirty tracking already enabled for %d regions\n",
+ num_regions);
+ return -EEXIST;
+ } else if (!max_regions) {
+ dev_err(&pdev->dev,
+ "Device doesn't support dirty tracking, max_regions %d\n",
+ max_regions);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Only support 1 region for now. If there are any large gaps in the
+ * VM's address regions, then this would be a waste of memory as we are
+ * generating 2 bitmaps (ack/seq) from the min address to the max
+ * address of the VM's address regions. In the future, if we support
+ * more than one region in the device/driver we can split the bitmaps
+ * on the largest address region gaps. We can do this split up to the
+ * max_regions times returned from the dirty_status command.
+ */
+ max_regions = 1;
+ if (num_ranges > max_regions) {
+ vfio_combine_iova_ranges(ranges, nnodes, max_regions);
+ num_ranges = max_regions;
+ }
+
+ node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
+ if (!node)
+ return -EINVAL;
+
+ region_size = node->last - node->start + 1;
+ region_start = node->start;
+ region_page_size = *page_size;
+
+ len = sizeof(*region_info);
+ region_info = kzalloc(len, GFP_KERNEL);
+ if (!region_info)
+ return -ENOMEM;
+
+ page_count = DIV_ROUND_UP(region_size, region_page_size);
+
+ region_info->dma_base = cpu_to_le64(region_start);
+ region_info->page_count = cpu_to_le32(page_count);
+ region_info->page_size_log2 = ilog2(region_page_size);
+
+ regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pdsc_dev, regions_dma)) {
+ err = -ENOMEM;
+ goto out_free_region_info;
+ }
+
+ err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, max_regions);
+ dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
+ if (err)
+ goto out_free_region_info;
+
+ /*
+ * page_count might be adjusted by the device,
+ * update it before freeing region_info DMA
+ */
+ page_count = le32_to_cpu(region_info->page_count);
+
+ dev_dbg(&pdev->dev,
+ "region_info: regions_dma 0x%llx dma_base 0x%llx page_count %u page_size_log2 %u\n",
+ regions_dma, region_start, page_count,
+ (u8)ilog2(region_page_size));
+
+ err = pds_vfio_dirty_alloc_bitmaps(dirty, page_count / BITS_PER_BYTE);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
+ ERR_PTR(err));
+ goto out_free_region_info;
+ }
+
+ err = pds_vfio_dirty_alloc_sgl(pds_vfio, page_count);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
+ ERR_PTR(err));
+ goto out_free_bitmaps;
+ }
+
+ dirty->region_start = region_start;
+ dirty->region_size = region_size;
+ dirty->region_page_size = region_page_size;
+ pds_vfio_dirty_set_enabled(pds_vfio);
+
+ pds_vfio_print_guest_region_info(pds_vfio, max_regions);
+
+ kfree(region_info);
+
+ return 0;
+
+out_free_bitmaps:
+ pds_vfio_dirty_free_bitmaps(dirty);
+out_free_region_info:
+ kfree(region_info);
+ return err;
+}
+
+void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
+{
+ if (pds_vfio_dirty_is_enabled(pds_vfio)) {
+ pds_vfio_dirty_set_disabled(pds_vfio);
+ if (send_cmd)
+ pds_vfio_dirty_disable_cmd(pds_vfio);
+ pds_vfio_dirty_free_sgl(pds_vfio);
+ pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
+ }
+
+ if (send_cmd)
+ pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
+}
+
+static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_bmp_info *bmp_info,
+ u32 offset, u32 bmp_bytes, bool read_seq)
+{
+ const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
+ u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
+ struct device *pdsc_dev = &pci_physfn(pdev)->dev;
+ unsigned long long npages;
+ struct sg_table sg_table;
+ struct scatterlist *sg;
+ struct page **pages;
+ u32 page_offset;
+ const void *bmp;
+ size_t size;
+ u16 num_sge;
+ int err;
+ int i;
+
+ bmp = (void *)((u64)bmp_info->bmp + offset);
+ page_offset = offset_in_page(bmp);
+ bmp -= page_offset;
+
+ /*
+ * Start and end of bitmap section to seq/ack might not be page
+ * aligned, so use the page_offset to account for that so there
+ * will be enough pages to represent the bmp_bytes
+ */
+ npages = DIV_ROUND_UP_ULL(bmp_bytes + page_offset, PAGE_SIZE);
+ pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (unsigned long long i = 0; i < npages; i++) {
+ struct page *page = vmalloc_to_page(bmp);
+
+ if (!page) {
+ err = -EFAULT;
+ goto out_free_pages;
+ }
+
+ pages[i] = page;
+ bmp += PAGE_SIZE;
+ }
+
+ err = sg_alloc_table_from_pages(&sg_table, pages, npages, page_offset,
+ bmp_bytes, GFP_KERNEL);
+ if (err)
+ goto out_free_pages;
+
+ err = dma_map_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
+ if (err)
+ goto out_free_sg_table;
+
+ for_each_sgtable_dma_sg(&sg_table, sg, i) {
+ struct pds_lm_sg_elem *sg_elem = &bmp_info->sgl[i];
+
+ sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
+ sg_elem->len = cpu_to_le32(sg_dma_len(sg));
+ }
+
+ num_sge = sg_table.nents;
+ size = num_sge * sizeof(struct pds_lm_sg_elem);
+ dma_sync_single_for_device(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
+ err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, bmp_info->sgl_addr, num_sge,
+ offset, bmp_bytes, read_seq);
+ if (err)
+ dev_err(&pdev->dev,
+ "Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
+ bmp_type_str, offset, bmp_bytes,
+ num_sge, bmp_info->sgl_addr, ERR_PTR(err));
+ dma_sync_single_for_cpu(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
+
+ dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
+out_free_sg_table:
+ sg_free_table(&sg_table);
+out_free_pages:
+ kfree(pages);
+
+ return err;
+}
+
+static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
+ u32 offset, u32 len)
+{
+ return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_ack,
+ offset, len, WRITE_ACK);
+}
+
+static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
+ u32 offset, u32 len)
+{
+ return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_seq,
+ offset, len, READ_SEQ);
+}
+
+static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
+ struct iova_bitmap *dirty_bitmap,
+ u32 bmp_offset, u32 len_bytes)
+{
+ u64 page_size = pds_vfio->dirty.region_page_size;
+ u64 region_start = pds_vfio->dirty.region_start;
+ u32 bmp_offset_bit;
+ __le64 *seq, *ack;
+ int dword_count;
+
+ dword_count = len_bytes / sizeof(u64);
+ seq = (__le64 *)((u64)pds_vfio->dirty.host_seq.bmp + bmp_offset);
+ ack = (__le64 *)((u64)pds_vfio->dirty.host_ack.bmp + bmp_offset);
+ bmp_offset_bit = bmp_offset * 8;
+
+ for (int i = 0; i < dword_count; i++) {
+ u64 xor = le64_to_cpu(seq[i]) ^ le64_to_cpu(ack[i]);
+
+ /* prepare for next write_ack call */
+ ack[i] = seq[i];
+
+ for (u8 bit_i = 0; bit_i < BITS_PER_TYPE(u64); ++bit_i) {
+ if (xor & BIT(bit_i)) {
+ u64 abs_bit_i = bmp_offset_bit +
+ i * BITS_PER_TYPE(u64) + bit_i;
+ u64 addr = abs_bit_i * page_size + region_start;
+
+ iova_bitmap_set(dirty_bitmap, addr, page_size);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
+ struct iova_bitmap *dirty_bitmap,
+ unsigned long iova, unsigned long length)
+{
+ struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
+ struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+ u64 bmp_offset, bmp_bytes;
+ u64 bitmap_size, pages;
+ int err;
+
+ dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id);
+
+ if (!pds_vfio_dirty_is_enabled(pds_vfio)) {
+ dev_err(dev, "vf%u: Sync failed, dirty tracking is disabled\n",
+ pds_vfio->vf_id);
+ return -EINVAL;
+ }
+
+ pages = DIV_ROUND_UP(length, pds_vfio->dirty.region_page_size);
+ bitmap_size =
+ round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
+
+ dev_dbg(dev,
+ "vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
+ pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
+ pages, bitmap_size);
+
+ if (!length || ((dirty->region_start + iova + length) >
+ (dirty->region_start + dirty->region_size))) {
+ dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
+ iova, length);
+ return -EINVAL;
+ }
+
+ /* bitmap is modified in 64 bit chunks */
+ bmp_bytes = ALIGN(DIV_ROUND_UP(length / dirty->region_page_size,
+ sizeof(u64)),
+ sizeof(u64));
+ if (bmp_bytes != bitmap_size) {
+ dev_err(dev,
+ "Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
+ bmp_bytes, bitmap_size);
+ return -EINVAL;
+ }
+
+ bmp_offset = DIV_ROUND_UP(iova / dirty->region_page_size, sizeof(u64));
+
+ dev_dbg(dev,
+ "Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
+ iova, length, bmp_offset, bmp_bytes);
+
+ err = pds_vfio_dirty_read_seq(pds_vfio, bmp_offset, bmp_bytes);
+ if (err)
+ return err;
+
+ err = pds_vfio_dirty_process_bitmaps(pds_vfio, dirty_bitmap, bmp_offset,
+ bmp_bytes);
+ if (err)
+ return err;
+
+ err = pds_vfio_dirty_write_ack(pds_vfio, bmp_offset, bmp_bytes);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length, struct iova_bitmap *dirty)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+ int err;
+
+ mutex_lock(&pds_vfio->state_mutex);
+ err = pds_vfio_dirty_sync(pds_vfio, dirty, iova, length);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+
+ return err;
+}
+
+int pds_vfio_dma_logging_start(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+ int err;
+
+ mutex_lock(&pds_vfio->state_mutex);
+ pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_IN_PROGRESS);
+ err = pds_vfio_dirty_enable(pds_vfio, ranges, nnodes, page_size);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+
+ return err;
+}
+
+int pds_vfio_dma_logging_stop(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+
+ mutex_lock(&pds_vfio->state_mutex);
+ pds_vfio_dirty_disable(pds_vfio, true);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+
+ return 0;
+}
diff --git a/drivers/vfio/pci/pds/dirty.h b/drivers/vfio/pci/pds/dirty.h
new file mode 100644
index 000000000000..f78da25d75ca
--- /dev/null
+++ b/drivers/vfio/pci/pds/dirty.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _DIRTY_H_
+#define _DIRTY_H_
+
+struct pds_vfio_bmp_info {
+ unsigned long *bmp;
+ u32 bmp_bytes;
+ struct pds_lm_sg_elem *sgl;
+ dma_addr_t sgl_addr;
+ u16 num_sge;
+};
+
+struct pds_vfio_dirty {
+ struct pds_vfio_bmp_info host_seq;
+ struct pds_vfio_bmp_info host_ack;
+ u64 region_size;
+ u64 region_start;
+ u64 region_page_size;
+ bool is_enabled;
+};
+
+struct pds_vfio_pci_device;
+
+bool pds_vfio_dirty_is_enabled(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_dirty_set_enabled(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_dirty_set_disabled(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio,
+ bool send_cmd);
+
+int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length,
+ struct iova_bitmap *dirty);
+int pds_vfio_dma_logging_start(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size);
+int pds_vfio_dma_logging_stop(struct vfio_device *vdev);
+#endif /* _DIRTY_H_ */
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
new file mode 100644
index 000000000000..79fe2e66bb49
--- /dev/null
+++ b/drivers/vfio/pci/pds/lm.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+
+#include "vfio_dev.h"
+#include "cmds.h"
+
+static struct pds_vfio_lm_file *
+pds_vfio_get_lm_file(const struct file_operations *fops, int flags, u64 size)
+{
+ struct pds_vfio_lm_file *lm_file = NULL;
+ unsigned long long npages;
+ struct page **pages;
+ void *page_mem;
+ const void *p;
+
+ if (!size)
+ return NULL;
+
+ /* Alloc file structure */
+ lm_file = kzalloc(sizeof(*lm_file), GFP_KERNEL);
+ if (!lm_file)
+ return NULL;
+
+ /* Create file */
+ lm_file->filep =
+ anon_inode_getfile("pds_vfio_lm", fops, lm_file, flags);
+ if (IS_ERR(lm_file->filep))
+ goto out_free_file;
+
+ stream_open(lm_file->filep->f_inode, lm_file->filep);
+ mutex_init(&lm_file->lock);
+
+ /* prevent file from being released before we are done with it */
+ get_file(lm_file->filep);
+
+ /* Allocate memory for file pages */
+ npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE);
+ pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ goto out_put_file;
+
+ page_mem = kvzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+ if (!page_mem)
+ goto out_free_pages_array;
+
+ p = page_mem - offset_in_page(page_mem);
+ for (unsigned long long i = 0; i < npages; i++) {
+ if (is_vmalloc_addr(p))
+ pages[i] = vmalloc_to_page(p);
+ else
+ pages[i] = kmap_to_page((void *)p);
+ if (!pages[i])
+ goto out_free_page_mem;
+
+ p += PAGE_SIZE;
+ }
+
+ /* Create scatterlist of file pages to use for DMA mapping later */
+ if (sg_alloc_table_from_pages(&lm_file->sg_table, pages, npages, 0,
+ size, GFP_KERNEL))
+ goto out_free_page_mem;
+
+ lm_file->size = size;
+ lm_file->pages = pages;
+ lm_file->npages = npages;
+ lm_file->page_mem = page_mem;
+ lm_file->alloc_size = npages * PAGE_SIZE;
+
+ return lm_file;
+
+out_free_page_mem:
+ kvfree(page_mem);
+out_free_pages_array:
+ kfree(pages);
+out_put_file:
+ fput(lm_file->filep);
+ mutex_destroy(&lm_file->lock);
+out_free_file:
+ kfree(lm_file);
+
+ return NULL;
+}
+
+static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
+{
+ mutex_lock(&lm_file->lock);
+
+ lm_file->size = 0;
+ lm_file->alloc_size = 0;
+
+ /* Free scatter list of file pages */
+ sg_free_table(&lm_file->sg_table);
+
+ kvfree(lm_file->page_mem);
+ lm_file->page_mem = NULL;
+ kfree(lm_file->pages);
+ lm_file->pages = NULL;
+
+ mutex_unlock(&lm_file->lock);
+
+ /* allow file to be released since we are done with it */
+ fput(lm_file->filep);
+}
+
+void pds_vfio_put_save_file(struct pds_vfio_pci_device *pds_vfio)
+{
+ if (!pds_vfio->save_file)
+ return;
+
+ pds_vfio_put_lm_file(pds_vfio->save_file);
+ pds_vfio->save_file = NULL;
+}
+
+void pds_vfio_put_restore_file(struct pds_vfio_pci_device *pds_vfio)
+{
+ if (!pds_vfio->restore_file)
+ return;
+
+ pds_vfio_put_lm_file(pds_vfio->restore_file);
+ pds_vfio->restore_file = NULL;
+}
+
+static struct page *pds_vfio_get_file_page(struct pds_vfio_lm_file *lm_file,
+ unsigned long offset)
+{
+ unsigned long cur_offset = 0;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ /* All accesses are sequential */
+ if (offset < lm_file->last_offset || !lm_file->last_offset_sg) {
+ lm_file->last_offset = 0;
+ lm_file->last_offset_sg = lm_file->sg_table.sgl;
+ lm_file->sg_last_entry = 0;
+ }
+
+ cur_offset = lm_file->last_offset;
+
+ for_each_sg(lm_file->last_offset_sg, sg,
+ lm_file->sg_table.orig_nents - lm_file->sg_last_entry, i) {
+ if (offset < sg->length + cur_offset) {
+ lm_file->last_offset_sg = sg;
+ lm_file->sg_last_entry += i;
+ lm_file->last_offset = cur_offset;
+ return nth_page(sg_page(sg),
+ (offset - cur_offset) / PAGE_SIZE);
+ }
+ cur_offset += sg->length;
+ }
+
+ return NULL;
+}
+
+static int pds_vfio_release_file(struct inode *inode, struct file *filp)
+{
+ struct pds_vfio_lm_file *lm_file = filp->private_data;
+
+ mutex_lock(&lm_file->lock);
+ lm_file->filep->f_pos = 0;
+ lm_file->size = 0;
+ mutex_unlock(&lm_file->lock);
+ mutex_destroy(&lm_file->lock);
+ kfree(lm_file);
+
+ return 0;
+}
+
+static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct pds_vfio_lm_file *lm_file = filp->private_data;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ mutex_lock(&lm_file->lock);
+ if (*pos > lm_file->size) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, lm_file->size - *pos, len);
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *from_buff;
+ int err;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ from_buff = kmap_local_page(page);
+ err = copy_to_user(buf, from_buff + page_offset, page_len);
+ kunmap_local(from_buff);
+ if (err) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ }
+
+out_unlock:
+ mutex_unlock(&lm_file->lock);
+ return done;
+}
+
+static const struct file_operations pds_vfio_save_fops = {
+ .owner = THIS_MODULE,
+ .read = pds_vfio_save_read,
+ .release = pds_vfio_release_file,
+ .llseek = no_llseek,
+};
+
+static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio)
+{
+ struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
+ struct pds_vfio_lm_file *lm_file;
+ u64 size;
+ int err;
+
+ /* Get live migration state size in this state */
+ err = pds_vfio_get_lm_state_size_cmd(pds_vfio, &size);
+ if (err) {
+ dev_err(dev, "failed to get save status: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ dev_dbg(dev, "save status, size = %lld\n", size);
+
+ if (!size) {
+ dev_err(dev, "invalid state size\n");
+ return -EIO;
+ }
+
+ lm_file = pds_vfio_get_lm_file(&pds_vfio_save_fops, O_RDONLY, size);
+ if (!lm_file) {
+ dev_err(dev, "failed to create save file\n");
+ return -ENOENT;
+ }
+
+ dev_dbg(dev, "size = %lld, alloc_size = %lld, npages = %lld\n",
+ lm_file->size, lm_file->alloc_size, lm_file->npages);
+
+ pds_vfio->save_file = lm_file;
+
+ return 0;
+}
+
+static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct pds_vfio_lm_file *lm_file = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+
+ pos = &filp->f_pos;
+
+ if (*pos < 0 ||
+ check_add_overflow((loff_t)len, *pos, &requested_length))
+ return -EINVAL;
+
+ mutex_lock(&lm_file->lock);
+
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *to_buff;
+ int err;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ to_buff = kmap_local_page(page);
+ err = copy_from_user(to_buff + page_offset, buf, page_len);
+ kunmap_local(to_buff);
+ if (err) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ lm_file->size += page_len;
+ }
+out_unlock:
+ mutex_unlock(&lm_file->lock);
+ return done;
+}
+
+static const struct file_operations pds_vfio_restore_fops = {
+ .owner = THIS_MODULE,
+ .write = pds_vfio_restore_write,
+ .release = pds_vfio_release_file,
+ .llseek = no_llseek,
+};
+
+static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio)
+{
+ struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
+ struct pds_vfio_lm_file *lm_file;
+ u64 size;
+
+ size = sizeof(union pds_lm_dev_state);
+ dev_dbg(dev, "restore status, size = %lld\n", size);
+
+ if (!size) {
+ dev_err(dev, "invalid state size");
+ return -EIO;
+ }
+
+ lm_file = pds_vfio_get_lm_file(&pds_vfio_restore_fops, O_WRONLY, size);
+ if (!lm_file) {
+ dev_err(dev, "failed to create restore file");
+ return -ENOENT;
+ }
+ pds_vfio->restore_file = lm_file;
+
+ return 0;
+}
+
+struct file *
+pds_vfio_step_device_state_locked(struct pds_vfio_pci_device *pds_vfio,
+ enum vfio_device_mig_state next)
+{
+ enum vfio_device_mig_state cur = pds_vfio->state;
+ int err;
+
+ if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_STOP_COPY) {
+ err = pds_vfio_get_save_file(pds_vfio);
+ if (err)
+ return ERR_PTR(err);
+
+ err = pds_vfio_get_lm_state_cmd(pds_vfio);
+ if (err) {
+ pds_vfio_put_save_file(pds_vfio);
+ return ERR_PTR(err);
+ }
+
+ return pds_vfio->save_file->filep;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP_COPY && next == VFIO_DEVICE_STATE_STOP) {
+ pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, true);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RESUMING) {
+ err = pds_vfio_get_restore_file(pds_vfio);
+ if (err)
+ return ERR_PTR(err);
+
+ return pds_vfio->restore_file->filep;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && next == VFIO_DEVICE_STATE_STOP) {
+ err = pds_vfio_set_lm_state_cmd(pds_vfio);
+ if (err)
+ return ERR_PTR(err);
+
+ pds_vfio_put_restore_file(pds_vfio);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ pds_vfio_send_host_vf_lm_status_cmd(pds_vfio,
+ PDS_LM_STA_IN_PROGRESS);
+ err = pds_vfio_suspend_device_cmd(pds_vfio,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P);
+ if (err)
+ return ERR_PTR(err);
+
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_RUNNING) {
+ err = pds_vfio_resume_device_cmd(pds_vfio,
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL);
+ if (err)
+ return ERR_PTR(err);
+
+ pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ err = pds_vfio_resume_device_cmd(pds_vfio,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P);
+ if (err)
+ return ERR_PTR(err);
+
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_STOP) {
+ err = pds_vfio_suspend_device_cmd(pds_vfio,
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL);
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/vfio/pci/pds/lm.h b/drivers/vfio/pci/pds/lm.h
new file mode 100644
index 000000000000..13be893198b7
--- /dev/null
+++ b/drivers/vfio/pci/pds/lm.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _LM_H_
+#define _LM_H_
+
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_adminq.h>
+
+struct pds_vfio_lm_file {
+ struct file *filep;
+ struct mutex lock; /* protect live migration data file */
+ u64 size; /* Size with valid data */
+ u64 alloc_size; /* Total allocated size. Always >= len */
+ void *page_mem; /* memory allocated for pages */
+ struct page **pages; /* Backing pages for file */
+ unsigned long long npages;
+ struct sg_table sg_table; /* SG table for backing pages */
+ struct pds_lm_sg_elem *sgl; /* DMA mapping */
+ dma_addr_t sgl_addr;
+ u16 num_sge;
+ struct scatterlist *last_offset_sg; /* Iterator */
+ unsigned int sg_last_entry;
+ unsigned long last_offset;
+};
+
+struct pds_vfio_pci_device;
+
+struct file *
+pds_vfio_step_device_state_locked(struct pds_vfio_pci_device *pds_vfio,
+ enum vfio_device_mig_state next);
+
+void pds_vfio_put_save_file(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_put_restore_file(struct pds_vfio_pci_device *pds_vfio);
+
+#endif /* _LM_H_ */
diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
new file mode 100644
index 000000000000..ab4b5958e413
--- /dev/null
+++ b/drivers/vfio/pci/pds/pci_drv.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/vfio.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+
+#include "vfio_dev.h"
+#include "pci_drv.h"
+#include "cmds.h"
+
+#define PDS_VFIO_DRV_DESCRIPTION "AMD/Pensando VFIO Device Driver"
+#define PCI_VENDOR_ID_PENSANDO 0x1dd8
+
+static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
+{
+ bool deferred_reset_needed = false;
+
+ /*
+ * Documentation states that the kernel migration driver must not
+ * generate asynchronous device state transitions outside of
+ * manipulation by the user or the VFIO_DEVICE_RESET ioctl.
+ *
+ * Since recovery is an asynchronous event received from the device,
+ * initiate a deferred reset. Issue a deferred reset in the following
+ * situations:
+ * 1. Migration is in progress, which will cause the next step of
+ * the migration to fail.
+ * 2. If the device is in a state that will be set to
+ * VFIO_DEVICE_STATE_RUNNING on the next action (i.e. VM is
+ * shutdown and device is in VFIO_DEVICE_STATE_STOP).
+ */
+ mutex_lock(&pds_vfio->state_mutex);
+ if ((pds_vfio->state != VFIO_DEVICE_STATE_RUNNING &&
+ pds_vfio->state != VFIO_DEVICE_STATE_ERROR) ||
+ (pds_vfio->state == VFIO_DEVICE_STATE_RUNNING &&
+ pds_vfio_dirty_is_enabled(pds_vfio)))
+ deferred_reset_needed = true;
+ mutex_unlock(&pds_vfio->state_mutex);
+
+ /*
+ * On the next user initiated state transition, the device will
+ * transition to the VFIO_DEVICE_STATE_ERROR. At this point it's the user's
+ * responsibility to reset the device.
+ *
+ * If a VFIO_DEVICE_RESET is requested post recovery and before the next
+ * state transition, then the deferred reset state will be set to
+ * VFIO_DEVICE_STATE_RUNNING.
+ */
+ if (deferred_reset_needed) {
+ spin_lock(&pds_vfio->reset_lock);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
+ spin_unlock(&pds_vfio->reset_lock);
+ }
+}
+
+static int pds_vfio_pci_notify_handler(struct notifier_block *nb,
+ unsigned long ecode, void *data)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(nb, struct pds_vfio_pci_device, nb);
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ union pds_core_notifyq_comp *event = data;
+
+ dev_dbg(dev, "%s: event code %lu\n", __func__, ecode);
+
+ /*
+ * We don't need to do anything for RESET state==0 as there is no notify
+ * or feedback mechanism available, and it is possible that we won't
+ * even see a state==0 event since the pds_core recovery is pending.
+ *
+ * Any requests from VFIO while state==0 will fail, which will return
+ * error and may cause migration to fail.
+ */
+ if (ecode == PDS_EVENT_RESET) {
+ dev_info(dev, "%s: PDS_EVENT_RESET event received, state==%d\n",
+ __func__, event->reset.state);
+ /*
+ * pds_core device finished recovery and sent us the
+ * notification (state == 1) to allow us to recover
+ */
+ if (event->reset.state == 1)
+ pds_vfio_recovery(pds_vfio);
+ }
+
+ return 0;
+}
+
+static int
+pds_vfio_pci_register_event_handler(struct pds_vfio_pci_device *pds_vfio)
+{
+ struct device *dev = pds_vfio_to_dev(pds_vfio);
+ struct notifier_block *nb = &pds_vfio->nb;
+ int err;
+
+ if (!nb->notifier_call) {
+ nb->notifier_call = pds_vfio_pci_notify_handler;
+ err = pdsc_register_notify(nb);
+ if (err) {
+ nb->notifier_call = NULL;
+ dev_err(dev,
+ "failed to register pds event handler: %pe\n",
+ ERR_PTR(err));
+ return -EINVAL;
+ }
+ dev_dbg(dev, "pds event handler registered\n");
+ }
+
+ return 0;
+}
+
+static void
+pds_vfio_pci_unregister_event_handler(struct pds_vfio_pci_device *pds_vfio)
+{
+ if (pds_vfio->nb.notifier_call) {
+ pdsc_unregister_notify(&pds_vfio->nb);
+ pds_vfio->nb.notifier_call = NULL;
+ }
+}
+
+static int pds_vfio_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pds_vfio_pci_device *pds_vfio;
+ int err;
+
+ pds_vfio = vfio_alloc_device(pds_vfio_pci_device, vfio_coredev.vdev,
+ &pdev->dev, pds_vfio_ops_info());
+ if (IS_ERR(pds_vfio))
+ return PTR_ERR(pds_vfio);
+
+ dev_set_drvdata(&pdev->dev, &pds_vfio->vfio_coredev);
+
+ err = vfio_pci_core_register_device(&pds_vfio->vfio_coredev);
+ if (err)
+ goto out_put_vdev;
+
+ err = pds_vfio_register_client_cmd(pds_vfio);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register as client: %pe\n",
+ ERR_PTR(err));
+ goto out_unregister_coredev;
+ }
+
+ err = pds_vfio_pci_register_event_handler(pds_vfio);
+ if (err)
+ goto out_unregister_client;
+
+ return 0;
+
+out_unregister_client:
+ pds_vfio_unregister_client_cmd(pds_vfio);
+out_unregister_coredev:
+ vfio_pci_core_unregister_device(&pds_vfio->vfio_coredev);
+out_put_vdev:
+ vfio_put_device(&pds_vfio->vfio_coredev.vdev);
+ return err;
+}
+
+static void pds_vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct pds_vfio_pci_device *pds_vfio = pds_vfio_pci_drvdata(pdev);
+
+ pds_vfio_pci_unregister_event_handler(pds_vfio);
+ pds_vfio_unregister_client_cmd(pds_vfio);
+ vfio_pci_core_unregister_device(&pds_vfio->vfio_coredev);
+ vfio_put_device(&pds_vfio->vfio_coredev.vdev);
+}
+
+static const struct pci_device_id pds_vfio_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_PENSANDO, 0x1003) }, /* Ethernet VF */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pds_vfio_pci_table);
+
+static void pds_vfio_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct pds_vfio_pci_device *pds_vfio = pds_vfio_pci_drvdata(pdev);
+
+ pds_vfio_reset(pds_vfio);
+}
+
+static const struct pci_error_handlers pds_vfio_pci_err_handlers = {
+ .reset_done = pds_vfio_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver pds_vfio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pds_vfio_pci_table,
+ .probe = pds_vfio_pci_probe,
+ .remove = pds_vfio_pci_remove,
+ .err_handler = &pds_vfio_pci_err_handlers,
+ .driver_managed_dma = true,
+};
+
+module_pci_driver(pds_vfio_pci_driver);
+
+MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION);
+MODULE_AUTHOR("Brett Creeley <brett.creeley@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/vfio/pci/pds/pci_drv.h b/drivers/vfio/pci/pds/pci_drv.h
new file mode 100644
index 000000000000..e79bed12ed14
--- /dev/null
+++ b/drivers/vfio/pci/pds/pci_drv.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PCI_DRV_H
+#define _PCI_DRV_H
+
+#include <linux/pci.h>
+
+#endif /* _PCI_DRV_H */
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
new file mode 100644
index 000000000000..b46174f5eb09
--- /dev/null
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+
+#include "lm.h"
+#include "dirty.h"
+#include "vfio_dev.h"
+
+struct pci_dev *pds_vfio_to_pci_dev(struct pds_vfio_pci_device *pds_vfio)
+{
+ return pds_vfio->vfio_coredev.pdev;
+}
+
+struct device *pds_vfio_to_dev(struct pds_vfio_pci_device *pds_vfio)
+{
+ return &pds_vfio_to_pci_dev(pds_vfio)->dev;
+}
+
+struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct pds_vfio_pci_device,
+ vfio_coredev);
+}
+
+void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
+{
+again:
+ spin_lock(&pds_vfio->reset_lock);
+ if (pds_vfio->deferred_reset) {
+ pds_vfio->deferred_reset = false;
+ if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
+ pds_vfio_put_restore_file(pds_vfio);
+ pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, false);
+ }
+ pds_vfio->state = pds_vfio->deferred_reset_state;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+ spin_unlock(&pds_vfio->reset_lock);
+ goto again;
+ }
+ mutex_unlock(&pds_vfio->state_mutex);
+ spin_unlock(&pds_vfio->reset_lock);
+}
+
+void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
+{
+ spin_lock(&pds_vfio->reset_lock);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+ if (!mutex_trylock(&pds_vfio->state_mutex)) {
+ spin_unlock(&pds_vfio->reset_lock);
+ return;
+ }
+ spin_unlock(&pds_vfio->reset_lock);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+}
+
+static struct file *
+pds_vfio_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+ struct file *res = NULL;
+
+ mutex_lock(&pds_vfio->state_mutex);
+ /*
+ * only way to transition out of VFIO_DEVICE_STATE_ERROR is via
+ * VFIO_DEVICE_RESET, so prevent the state machine from running since
+ * vfio_mig_get_next_state() will throw a WARN_ON() when transitioning
+ * from VFIO_DEVICE_STATE_ERROR to any other state
+ */
+ while (pds_vfio->state != VFIO_DEVICE_STATE_ERROR &&
+ new_state != pds_vfio->state) {
+ enum vfio_device_mig_state next_state;
+
+ int err = vfio_mig_get_next_state(vdev, pds_vfio->state,
+ new_state, &next_state);
+ if (err) {
+ res = ERR_PTR(err);
+ break;
+ }
+
+ res = pds_vfio_step_device_state_locked(pds_vfio, next_state);
+ if (IS_ERR(res))
+ break;
+
+ pds_vfio->state = next_state;
+
+ if (WARN_ON(res && new_state != pds_vfio->state)) {
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ pds_vfio_state_mutex_unlock(pds_vfio);
+ /* still waiting on a deferred_reset */
+ if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR)
+ res = ERR_PTR(-EIO);
+
+ return res;
+}
+
+static int pds_vfio_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *current_state)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+
+ mutex_lock(&pds_vfio->state_mutex);
+ *current_state = pds_vfio->state;
+ pds_vfio_state_mutex_unlock(pds_vfio);
+ return 0;
+}
+
+static int pds_vfio_get_device_state_size(struct vfio_device *vdev,
+ unsigned long *stop_copy_length)
+{
+ *stop_copy_length = PDS_LM_DEVICE_STATE_LENGTH;
+ return 0;
+}
+
+static const struct vfio_migration_ops pds_vfio_lm_ops = {
+ .migration_set_state = pds_vfio_set_device_state,
+ .migration_get_state = pds_vfio_get_device_state,
+ .migration_get_data_size = pds_vfio_get_device_state_size
+};
+
+static const struct vfio_log_ops pds_vfio_log_ops = {
+ .log_start = pds_vfio_dma_logging_start,
+ .log_stop = pds_vfio_dma_logging_stop,
+ .log_read_and_clear = pds_vfio_dma_logging_report,
+};
+
+static int pds_vfio_init_device(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+ struct pci_dev *pdev = to_pci_dev(vdev->dev);
+ int err, vf_id, pci_id;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ err = vfio_pci_core_init_dev(vdev);
+ if (err)
+ return err;
+
+ pds_vfio->vf_id = vf_id;
+
+ vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
+ vdev->mig_ops = &pds_vfio_lm_ops;
+ vdev->log_ops = &pds_vfio_log_ops;
+
+ pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ dev_dbg(&pdev->dev,
+ "%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
+ __func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
+ pci_domain_nr(pdev->bus), pds_vfio);
+
+ return 0;
+}
+
+static int pds_vfio_open_device(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+ int err;
+
+ err = vfio_pci_core_enable(&pds_vfio->vfio_coredev);
+ if (err)
+ return err;
+
+ mutex_init(&pds_vfio->state_mutex);
+ pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+
+ vfio_pci_core_finish_enable(&pds_vfio->vfio_coredev);
+
+ return 0;
+}
+
+static void pds_vfio_close_device(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+
+ mutex_lock(&pds_vfio->state_mutex);
+ pds_vfio_put_restore_file(pds_vfio);
+ pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, true);
+ mutex_unlock(&pds_vfio->state_mutex);
+ mutex_destroy(&pds_vfio->state_mutex);
+ vfio_pci_core_close_device(vdev);
+}
+
+static const struct vfio_device_ops pds_vfio_ops = {
+ .name = "pds-vfio",
+ .init = pds_vfio_init_device,
+ .release = vfio_pci_core_release_dev,
+ .open_device = pds_vfio_open_device,
+ .close_device = pds_vfio_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+};
+
+const struct vfio_device_ops *pds_vfio_ops_info(void)
+{
+ return &pds_vfio_ops;
+}
diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
new file mode 100644
index 000000000000..b8f2d667608f
--- /dev/null
+++ b/drivers/vfio/pci/pds/vfio_dev.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _VFIO_DEV_H_
+#define _VFIO_DEV_H_
+
+#include <linux/pci.h>
+#include <linux/vfio_pci_core.h>
+
+#include "dirty.h"
+#include "lm.h"
+
+struct pds_vfio_pci_device {
+ struct vfio_pci_core_device vfio_coredev;
+
+ struct pds_vfio_lm_file *save_file;
+ struct pds_vfio_lm_file *restore_file;
+ struct pds_vfio_dirty dirty;
+ struct mutex state_mutex; /* protect migration state */
+ enum vfio_device_mig_state state;
+ spinlock_t reset_lock; /* protect reset_done flow */
+ u8 deferred_reset;
+ enum vfio_device_mig_state deferred_reset_state;
+ struct notifier_block nb;
+
+ int vf_id;
+ u16 client_id;
+};
+
+void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio);
+
+const struct vfio_device_ops *pds_vfio_ops_info(void);
+struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev);
+void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio);
+
+struct pci_dev *pds_vfio_to_pci_dev(struct pds_vfio_pci_device *pds_vfio);
+struct device *pds_vfio_to_dev(struct pds_vfio_pci_device *pds_vfio);
+
+#endif /* _VFIO_DEV_H_ */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 29091ee2e984..cb5b7f865d58 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -141,6 +141,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 20d7b69ea6ff..1929103ee59a 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -27,6 +27,7 @@
#include <linux/vgaarb.h>
#include <linux/nospec.h>
#include <linux/sched/mm.h>
+#include <linux/iommufd.h>
#if IS_ENABLED(CONFIG_EEH)
#include <asm/eeh.h>
#endif
@@ -180,7 +181,8 @@ no_mmap:
struct vfio_pci_group_info;
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
- struct vfio_pci_group_info *groups);
+ struct vfio_pci_group_info *groups,
+ struct iommufd_ctx *iommufd_ctx);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -776,29 +778,65 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
}
struct vfio_pci_fill_info {
- int max;
- int cur;
- struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_dependent_device __user *devices;
+ struct vfio_pci_dependent_device __user *devices_end;
+ struct vfio_device *vdev;
+ u32 count;
+ u32 flags;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
+ struct vfio_pci_dependent_device info = {
+ .segment = pci_domain_nr(pdev->bus),
+ .bus = pdev->bus->number,
+ .devfn = pdev->devfn,
+ };
struct vfio_pci_fill_info *fill = data;
- struct iommu_group *iommu_group;
- if (fill->cur == fill->max)
- return -EAGAIN; /* Something changed, try again */
+ fill->count++;
+ if (fill->devices >= fill->devices_end)
+ return 0;
+
+ if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
+ struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
+ struct vfio_device_set *dev_set = fill->vdev->dev_set;
+ struct vfio_device *vdev;
+
+ /*
+ * hot-reset requires all affected devices be represented in
+ * the dev_set.
+ */
+ vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
+ if (!vdev) {
+ info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ } else {
+ int id = vfio_iommufd_get_dev_id(vdev, iommufd);
+
+ if (id > 0)
+ info.devid = id;
+ else if (id == -ENOENT)
+ info.devid = VFIO_PCI_DEVID_OWNED;
+ else
+ info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ }
+ /* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
+ if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
+ fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ } else {
+ struct iommu_group *iommu_group;
+
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
- iommu_group = iommu_group_get(&pdev->dev);
- if (!iommu_group)
- return -EPERM; /* Cannot reset non-isolated devices */
+ info.group_id = iommu_group_id(iommu_group);
+ iommu_group_put(iommu_group);
+ }
- fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
- fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
- fill->devices[fill->cur].bus = pdev->bus->number;
- fill->devices[fill->cur].devfn = pdev->devfn;
- fill->cur++;
- iommu_group_put(iommu_group);
+ if (copy_to_user(fill->devices, &info, sizeof(info)))
+ return -EFAULT;
+ fill->devices++;
return 0;
}
@@ -920,24 +958,17 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
struct vfio_device_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
- struct vfio_device_info info;
+ struct vfio_device_info info = {};
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
int ret;
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
-
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0;
- }
+ minsz = min_t(size_t, info.argsz, sizeof(info));
info.flags = VFIO_DEVICE_FLAGS_PCI;
@@ -1228,8 +1259,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
unsigned long minsz =
offsetofend(struct vfio_pci_hot_reset_info, count);
struct vfio_pci_hot_reset_info hdr;
- struct vfio_pci_fill_info fill = { 0 };
- struct vfio_pci_dependent_device *devices = NULL;
+ struct vfio_pci_fill_info fill = {};
bool slot = false;
int ret = 0;
@@ -1247,78 +1277,42 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
- /* How many devices are affected? */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
- &fill.max, slot);
- if (ret)
- return ret;
+ fill.devices = arg->devices;
+ fill.devices_end = arg->devices +
+ (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
+ fill.vdev = &vdev->vdev;
- WARN_ON(!fill.max); /* Should always be at least one */
-
- /*
- * If there's enough space, fill it now, otherwise return -ENOSPC and
- * the number of devices affected.
- */
- if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
- ret = -ENOSPC;
- hdr.count = fill.max;
- goto reset_info_exit;
- }
-
- devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
-
- fill.devices = devices;
+ if (vfio_device_cdev_opened(&vdev->vdev))
+ fill.flags |= VFIO_PCI_HOT_RESET_FLAG_DEV_ID |
+ VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ mutex_lock(&vdev->vdev.dev_set->lock);
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
&fill, slot);
+ mutex_unlock(&vdev->vdev.dev_set->lock);
+ if (ret)
+ return ret;
- /*
- * If a device was removed between counting and filling, we may come up
- * short of fill.max. If a device was added, we'll have a return of
- * -EAGAIN above.
- */
- if (!ret)
- hdr.count = fill.cur;
-
-reset_info_exit:
+ hdr.count = fill.count;
+ hdr.flags = fill.flags;
if (copy_to_user(arg, &hdr, minsz))
- ret = -EFAULT;
-
- if (!ret) {
- if (copy_to_user(&arg->devices, devices,
- hdr.count * sizeof(*devices)))
- ret = -EFAULT;
- }
+ return -EFAULT;
- kfree(devices);
- return ret;
+ if (fill.count > fill.devices - arg->devices)
+ return -ENOSPC;
+ return 0;
}
-static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
- struct vfio_pci_hot_reset __user *arg)
+static int
+vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
+ int array_count, bool slot,
+ struct vfio_pci_hot_reset __user *arg)
{
- unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
- struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
struct file **files;
struct vfio_pci_group_info info;
- bool slot = false;
int file_idx, count = 0, ret = 0;
- if (copy_from_user(&hdr, arg, minsz))
- return -EFAULT;
-
- if (hdr.argsz < minsz || hdr.flags)
- return -EINVAL;
-
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
-
/*
* We can't let userspace give us an arbitrarily large buffer to copy,
* so verify how many we think there could be. Note groups can have
@@ -1329,12 +1323,11 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
if (ret)
return ret;
- /* Somewhere between 1 and count is OK */
- if (!hdr.count || hdr.count > count)
+ if (array_count > count)
return -EINVAL;
- group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ group_fds = kcalloc(array_count, sizeof(*group_fds), GFP_KERNEL);
+ files = kcalloc(array_count, sizeof(*files), GFP_KERNEL);
if (!group_fds || !files) {
kfree(group_fds);
kfree(files);
@@ -1342,18 +1335,17 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
}
if (copy_from_user(group_fds, arg->group_fds,
- hdr.count * sizeof(*group_fds))) {
+ array_count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(files);
return -EFAULT;
}
/*
- * For each group_fd, get the group through the vfio external user
- * interface and store the group and iommu ID. This ensures the group
- * is held across the reset.
+ * Get the group file for each fd to ensure the group is held across
+ * the reset
*/
- for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ for (file_idx = 0; file_idx < array_count; file_idx++) {
struct file *file = fget(group_fds[file_idx]);
if (!file) {
@@ -1377,10 +1369,10 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
if (ret)
goto hot_reset_release;
- info.count = hdr.count;
+ info.count = array_count;
info.files = files;
- ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info, NULL);
hot_reset_release:
for (file_idx--; file_idx >= 0; file_idx--)
@@ -1390,6 +1382,36 @@ hot_reset_release:
return ret;
}
+static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ struct vfio_pci_hot_reset hdr;
+ bool slot = false;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* zero-length array is only for cdev opened devices */
+ if (!!hdr.count == vfio_device_cdev_opened(&vdev->vdev))
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ if (hdr.count)
+ return vfio_pci_ioctl_pci_hot_reset_groups(vdev, hdr.count, slot, arg);
+
+ return vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, NULL,
+ vfio_iommufd_device_ictx(&vdev->vdev));
+}
+
static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
struct vfio_device_ioeventfd __user *arg)
{
@@ -2355,13 +2377,16 @@ const struct pci_error_handlers vfio_pci_core_err_handlers = {
};
EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
-static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
+static bool vfio_dev_in_groups(struct vfio_device *vdev,
struct vfio_pci_group_info *groups)
{
unsigned int i;
+ if (!groups)
+ return false;
+
for (i = 0; i < groups->count; i++)
- if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
+ if (vfio_file_has_dev(groups->files[i], vdev))
return true;
return false;
}
@@ -2369,12 +2394,8 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
{
struct vfio_device_set *dev_set = data;
- struct vfio_device *cur;
- list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
- if (cur->dev == &pdev->dev)
- return 0;
- return -EBUSY;
+ return vfio_find_device_in_devset(dev_set, &pdev->dev) ? 0 : -ENODEV;
}
/*
@@ -2441,7 +2462,8 @@ unwind:
* get each memory_lock.
*/
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
- struct vfio_pci_group_info *groups)
+ struct vfio_pci_group_info *groups,
+ struct iommufd_ctx *iommufd_ctx)
{
struct vfio_pci_core_device *cur_mem;
struct vfio_pci_core_device *cur_vma;
@@ -2471,11 +2493,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
goto err_unlock;
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ bool owned;
+
/*
- * Test whether all the affected devices are contained by the
- * set of groups provided by the user.
+ * Test whether all the affected devices can be reset by the
+ * user.
+ *
+ * If called from a group opened device and the user provides
+ * a set of groups, all the devices in the dev_set should be
+ * contained by the set of groups provided by the user.
+ *
+ * If called from a cdev opened device and the user provides
+ * a zero-length array, all the devices in the dev_set must
+ * be bound to the same iommufd_ctx as the input iommufd_ctx.
+ * If there is any device that has not been bound to any
+ * iommufd_ctx yet, check if its iommu_group has any device
+ * bound to the input iommufd_ctx. Such devices can be
+ * considered owned by the input iommufd_ctx as the device
+ * cannot be owned by another iommufd_ctx when its iommu_group
+ * is owned.
+ *
+ * Otherwise, reset is not allowed.
*/
- if (!vfio_dev_in_groups(cur_vma, groups)) {
+ if (iommufd_ctx) {
+ int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
+ iommufd_ctx);
+
+ owned = (devid > 0 || devid == -ENOENT);
+ } else {
+ owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
+ }
+
+ if (!owned) {
ret = -EINVAL;
goto err_undo;
}
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 83fe54015595..6464b3939ebc 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -119,6 +119,7 @@ static const struct vfio_device_ops vfio_amba_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 22a1efca32a8..8cf22fa65baa 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -108,6 +108,7 @@ static const struct vfio_device_ops vfio_platform_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct platform_driver vfio_platform_driver = {
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 7b19c621e0e6..307e3f29b527 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -16,14 +16,32 @@ struct iommufd_ctx;
struct iommu_group;
struct vfio_container;
+struct vfio_device_file {
+ struct vfio_device *device;
+ struct vfio_group *group;
+
+ u8 access_granted;
+ u32 devid; /* only valid when iommufd is valid */
+ spinlock_t kvm_ref_lock; /* protect kvm field */
+ struct kvm *kvm;
+ struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
+};
+
void vfio_device_put_registration(struct vfio_device *device);
bool vfio_device_try_get_registration(struct vfio_device *device);
-int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd);
-void vfio_device_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd);
+int vfio_df_open(struct vfio_device_file *df);
+void vfio_df_close(struct vfio_device_file *df);
+struct vfio_device_file *
+vfio_allocate_device_file(struct vfio_device *device);
extern const struct file_operations vfio_device_fops;
+#ifdef CONFIG_VFIO_NOIOMMU
+extern bool vfio_noiommu __read_mostly;
+#else
+enum { vfio_noiommu = false };
+#endif
+
enum vfio_group_type {
/*
* Physical device with IOMMU backing.
@@ -48,6 +66,7 @@ enum vfio_group_type {
VFIO_NO_IOMMU,
};
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct vfio_group {
struct device dev;
struct cdev cdev;
@@ -74,8 +93,11 @@ struct vfio_group {
struct blocking_notifier_head notifier;
struct iommufd_ctx *iommufd;
spinlock_t kvm_ref_lock;
+ unsigned int cdev_device_open_cnt;
};
+int vfio_device_block_group(struct vfio_device *device);
+void vfio_device_unblock_group(struct vfio_device *device);
int vfio_device_set_group(struct vfio_device *device,
enum vfio_group_type type);
void vfio_device_remove_group(struct vfio_device *device);
@@ -83,7 +105,10 @@ void vfio_device_group_register(struct vfio_device *device);
void vfio_device_group_unregister(struct vfio_device *device);
int vfio_device_group_use_iommu(struct vfio_device *device);
void vfio_device_group_unuse_iommu(struct vfio_device *device);
-void vfio_device_group_close(struct vfio_device *device);
+void vfio_df_group_close(struct vfio_device_file *df);
+struct vfio_group *vfio_group_from_file(struct file *file);
+bool vfio_group_enforced_coherent(struct vfio_group *group);
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
@@ -93,6 +118,82 @@ static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
vdev->group->type == VFIO_NO_IOMMU;
}
+#else
+struct vfio_group;
+
+static inline int vfio_device_block_group(struct vfio_device *device)
+{
+ return 0;
+}
+
+static inline void vfio_device_unblock_group(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_set_group(struct vfio_device *device,
+ enum vfio_group_type type)
+{
+ return 0;
+}
+
+static inline void vfio_device_remove_group(struct vfio_device *device)
+{
+}
+
+static inline void vfio_device_group_register(struct vfio_device *device)
+{
+}
+
+static inline void vfio_device_group_unregister(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_group_use_iommu(struct vfio_device *device)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void vfio_device_group_unuse_iommu(struct vfio_device *device)
+{
+}
+
+static inline void vfio_df_group_close(struct vfio_device_file *df)
+{
+}
+
+static inline struct vfio_group *vfio_group_from_file(struct file *file)
+{
+ return NULL;
+}
+
+static inline bool vfio_group_enforced_coherent(struct vfio_group *group)
+{
+ return true;
+}
+
+static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
+{
+}
+
+static inline bool vfio_device_has_container(struct vfio_device *device)
+{
+ return false;
+}
+
+static inline int __init vfio_group_init(void)
+{
+ return 0;
+}
+
+static inline void vfio_group_cleanup(void)
+{
+}
+
+static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
+{
+ return false;
+}
+#endif /* CONFIG_VFIO_GROUP */
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/**
@@ -217,20 +318,109 @@ static inline void vfio_container_cleanup(void)
#endif
#if IS_ENABLED(CONFIG_IOMMUFD)
-int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
-void vfio_iommufd_unbind(struct vfio_device *device);
+bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx);
+int vfio_df_iommufd_bind(struct vfio_device_file *df);
+void vfio_df_iommufd_unbind(struct vfio_device_file *df);
+int vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
+ struct iommufd_ctx *ictx);
#else
-static inline int vfio_iommufd_bind(struct vfio_device *device,
+static inline bool
+vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
struct iommufd_ctx *ictx)
{
+ return false;
+}
+
+static inline int vfio_df_iommufd_bind(struct vfio_device_file *fd)
+{
return -EOPNOTSUPP;
}
-static inline void vfio_iommufd_unbind(struct vfio_device *device)
+static inline void vfio_df_iommufd_unbind(struct vfio_device_file *df)
{
}
+
+static inline int
+vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
+ struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
#endif
+int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
+ struct vfio_device_attach_iommufd_pt __user *arg);
+int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
+ struct vfio_device_detach_iommufd_pt __user *arg);
+
+#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
+void vfio_init_device_cdev(struct vfio_device *device);
+
+static inline int vfio_device_add(struct vfio_device *device)
+{
+ /* cdev does not support noiommu device */
+ if (vfio_device_is_noiommu(device))
+ return device_add(&device->device);
+ vfio_init_device_cdev(device);
+ return cdev_device_add(&device->cdev, &device->device);
+}
+
+static inline void vfio_device_del(struct vfio_device *device)
+{
+ if (vfio_device_is_noiommu(device))
+ device_del(&device->device);
+ else
+ cdev_device_del(&device->cdev, &device->device);
+}
+
+int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep);
+long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg);
+void vfio_df_unbind_iommufd(struct vfio_device_file *df);
+int vfio_cdev_init(struct class *device_class);
+void vfio_cdev_cleanup(void);
+#else
+static inline void vfio_init_device_cdev(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_add(struct vfio_device *device)
+{
+ return device_add(&device->device);
+}
+
+static inline void vfio_device_del(struct vfio_device *device)
+{
+ device_del(&device->device);
+}
+
+static inline int vfio_device_fops_cdev_open(struct inode *inode,
+ struct file *filep)
+{
+ return 0;
+}
+
+static inline long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg)
+{
+ return -ENOTTY;
+}
+
+static inline void vfio_df_unbind_iommufd(struct vfio_device_file *df)
+{
+}
+
+static inline int vfio_cdev_init(struct class *device_class)
+{
+ return 0;
+}
+
+static inline void vfio_cdev_cleanup(void)
+{
+}
+#endif /* CONFIG_VFIO_DEVICE_CDEV */
+
#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
int __init vfio_virqfd_init(void);
void vfio_virqfd_exit(void);
@@ -244,18 +434,12 @@ static inline void vfio_virqfd_exit(void)
}
#endif
-#ifdef CONFIG_VFIO_NOIOMMU
-extern bool vfio_noiommu __read_mostly;
-#else
-enum { vfio_noiommu = false };
-#endif
-
#ifdef CONFIG_HAVE_KVM
-void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
void vfio_device_put_kvm(struct vfio_device *device);
#else
-static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
- struct kvm *kvm)
+static inline void vfio_device_get_kvm_safe(struct vfio_device *device,
+ struct kvm *kvm)
{
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index ebe0ad31d0b0..eacd6ec04de5 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2732,7 +2732,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
{
- struct vfio_iommu_type1_info_cap_migration cap_mig;
+ struct vfio_iommu_type1_info_cap_migration cap_mig = {};
cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
cap_mig.header.version = 1;
@@ -2762,27 +2762,20 @@ static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned long arg)
{
- struct vfio_iommu_type1_info info;
+ struct vfio_iommu_type1_info info = {};
unsigned long minsz;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
-
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0; /* output, no-recopy necessary */
- }
+ minsz = min_t(size_t, info.argsz, sizeof(info));
mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index f0ca33b2e1df..cfad824d9aa2 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -141,6 +141,21 @@ unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
}
EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
+struct vfio_device *
+vfio_find_device_in_devset(struct vfio_device_set *dev_set,
+ struct device *dev)
+{
+ struct vfio_device *cur;
+
+ lockdep_assert_held(&dev_set->lock);
+
+ list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+ if (cur->dev == dev)
+ return cur;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vfio_find_device_in_devset);
+
/*
* Device objects - create, release, get, put, search
*/
@@ -258,7 +273,8 @@ static int __vfio_register_dev(struct vfio_device *device,
if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
(!device->ops->bind_iommufd ||
!device->ops->unbind_iommufd ||
- !device->ops->attach_ioas)))
+ !device->ops->attach_ioas ||
+ !device->ops->detach_ioas)))
return -EINVAL;
/*
@@ -276,7 +292,18 @@ static int __vfio_register_dev(struct vfio_device *device,
if (ret)
return ret;
- ret = device_add(&device->device);
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency. It has to be checked here because it is only
+ * valid for cases where we are using iommu groups.
+ */
+ if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) &&
+ !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = vfio_device_add(device);
if (ret)
goto err_out;
@@ -316,6 +343,18 @@ void vfio_unregister_group_dev(struct vfio_device *device)
bool interrupted = false;
long rc;
+ /*
+ * Prevent new device opened by userspace via the
+ * VFIO_GROUP_GET_DEVICE_FD in the group path.
+ */
+ vfio_device_group_unregister(device);
+
+ /*
+ * Balances vfio_device_add() in register path, also prevents
+ * new device opened by userspace in the cdev path.
+ */
+ vfio_device_del(device);
+
vfio_device_put_registration(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
@@ -339,18 +378,13 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
}
- vfio_device_group_unregister(device);
-
- /* Balances device_add in register path */
- device_del(&device->device);
-
/* Balances vfio_device_set_group in register path */
vfio_device_remove_group(device);
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
#ifdef CONFIG_HAVE_KVM
-void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{
void (*pfn)(struct kvm *kvm);
bool (*fn)(struct kvm *kvm);
@@ -358,6 +392,9 @@ void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
lockdep_assert_held(&device->dev_set->lock);
+ if (!kvm)
+ return;
+
pfn = symbol_get(kvm_put_kvm);
if (WARN_ON(!pfn))
return;
@@ -404,9 +441,25 @@ static bool vfio_assert_device_open(struct vfio_device *device)
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
-static int vfio_device_first_open(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+struct vfio_device_file *
+vfio_allocate_device_file(struct vfio_device *device)
{
+ struct vfio_device_file *df;
+
+ df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT);
+ if (!df)
+ return ERR_PTR(-ENOMEM);
+
+ df->device = device;
+ spin_lock_init(&df->kvm_ref_lock);
+
+ return df;
+}
+
+static int vfio_df_device_first_open(struct vfio_device_file *df)
+{
+ struct vfio_device *device = df->device;
+ struct iommufd_ctx *iommufd = df->iommufd;
int ret;
lockdep_assert_held(&device->dev_set->lock);
@@ -415,7 +468,7 @@ static int vfio_device_first_open(struct vfio_device *device,
return -ENODEV;
if (iommufd)
- ret = vfio_iommufd_bind(device, iommufd);
+ ret = vfio_df_iommufd_bind(df);
else
ret = vfio_device_group_use_iommu(device);
if (ret)
@@ -430,7 +483,7 @@ static int vfio_device_first_open(struct vfio_device *device,
err_unuse_iommu:
if (iommufd)
- vfio_iommufd_unbind(device);
+ vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
err_module_put:
@@ -438,29 +491,39 @@ err_module_put:
return ret;
}
-static void vfio_device_last_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+static void vfio_df_device_last_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+ struct iommufd_ctx *iommufd = df->iommufd;
+
lockdep_assert_held(&device->dev_set->lock);
if (device->ops->close_device)
device->ops->close_device(device);
if (iommufd)
- vfio_iommufd_unbind(device);
+ vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
module_put(device->dev->driver->owner);
}
-int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd)
+int vfio_df_open(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
int ret = 0;
lockdep_assert_held(&device->dev_set->lock);
+ /*
+ * Only the group path allows the device to be opened multiple
+ * times. The device cdev path doesn't have a secure way for it.
+ */
+ if (device->open_count != 0 && !df->group)
+ return -EINVAL;
+
device->open_count++;
if (device->open_count == 1) {
- ret = vfio_device_first_open(device, iommufd);
+ ret = vfio_df_device_first_open(df);
if (ret)
device->open_count--;
}
@@ -468,14 +531,15 @@ int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd)
return ret;
}
-void vfio_device_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+void vfio_df_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+
lockdep_assert_held(&device->dev_set->lock);
vfio_assert_device_open(device);
if (device->open_count == 1)
- vfio_device_last_close(device, iommufd);
+ vfio_df_device_last_close(df);
device->open_count--;
}
@@ -517,12 +581,18 @@ static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
- vfio_device_group_close(device);
+ if (df->group)
+ vfio_df_group_close(df);
+ else
+ vfio_df_unbind_iommufd(df);
vfio_device_put_registration(device);
+ kfree(df);
+
return 0;
}
@@ -865,6 +935,53 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
return 0;
}
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap, curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ curr = comb_start;
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges);
+
/* Ranges should fit into a single kernel page */
#define LOG_MAX_RANGES \
(PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
@@ -1087,16 +1204,38 @@ static int vfio_ioctl_device_feature(struct vfio_device *device,
static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+ void __user *uptr = (void __user *)arg;
int ret;
+ if (cmd == VFIO_DEVICE_BIND_IOMMUFD)
+ return vfio_df_ioctl_bind_iommufd(df, uptr);
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
+
ret = vfio_device_pm_runtime_get(device);
if (ret)
return ret;
+ /* cdev only ioctls */
+ if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) {
+ switch (cmd) {
+ case VFIO_DEVICE_ATTACH_IOMMUFD_PT:
+ ret = vfio_df_ioctl_attach_pt(df, uptr);
+ goto out;
+
+ case VFIO_DEVICE_DETACH_IOMMUFD_PT:
+ ret = vfio_df_ioctl_detach_pt(df, uptr);
+ goto out;
+ }
+ }
+
switch (cmd) {
case VFIO_DEVICE_FEATURE:
- ret = vfio_ioctl_device_feature(device, (void __user *)arg);
+ ret = vfio_ioctl_device_feature(device, uptr);
break;
default:
@@ -1106,7 +1245,7 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
ret = device->ops->ioctl(device, cmd, arg);
break;
}
-
+out:
vfio_device_pm_runtime_put(device);
return ret;
}
@@ -1114,7 +1253,12 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->read))
return -EINVAL;
@@ -1126,7 +1270,12 @@ static ssize_t vfio_device_fops_write(struct file *filep,
const char __user *buf,
size_t count, loff_t *ppos)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->write))
return -EINVAL;
@@ -1136,7 +1285,12 @@ static ssize_t vfio_device_fops_write(struct file *filep,
static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->mmap))
return -EINVAL;
@@ -1146,6 +1300,7 @@ static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
const struct file_operations vfio_device_fops = {
.owner = THIS_MODULE,
+ .open = vfio_device_fops_cdev_open,
.release = vfio_device_fops_release,
.read = vfio_device_fops_read,
.write = vfio_device_fops_write,
@@ -1154,6 +1309,88 @@ const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap,
};
+static struct vfio_device *vfio_device_from_file(struct file *file)
+{
+ struct vfio_device_file *df = file->private_data;
+
+ if (file->f_op != &vfio_device_fops)
+ return NULL;
+ return df->device;
+}
+
+/**
+ * vfio_file_is_valid - True if the file is valid vfio file
+ * @file: VFIO group file or VFIO device file
+ */
+bool vfio_file_is_valid(struct file *file)
+{
+ return vfio_group_from_file(file) ||
+ vfio_device_from_file(file);
+}
+EXPORT_SYMBOL_GPL(vfio_file_is_valid);
+
+/**
+ * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
+ * is always CPU cache coherent
+ * @file: VFIO group file or VFIO device file
+ *
+ * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
+ * bit in DMA transactions. A return of false indicates that the user has
+ * rights to access additional instructions such as wbinvd on x86.
+ */
+bool vfio_file_enforced_coherent(struct file *file)
+{
+ struct vfio_device *device;
+ struct vfio_group *group;
+
+ group = vfio_group_from_file(file);
+ if (group)
+ return vfio_group_enforced_coherent(group);
+
+ device = vfio_device_from_file(file);
+ if (device)
+ return device_iommu_capable(device->dev,
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
+
+static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
+{
+ struct vfio_device_file *df = file->private_data;
+
+ /*
+ * The kvm is first recorded in the vfio_device_file, and will
+ * be propagated to vfio_device::kvm when the file is bound to
+ * iommufd successfully in the vfio device cdev path.
+ */
+ spin_lock(&df->kvm_ref_lock);
+ df->kvm = kvm;
+ spin_unlock(&df->kvm_ref_lock);
+}
+
+/**
+ * vfio_file_set_kvm - Link a kvm with VFIO drivers
+ * @file: VFIO group file or VFIO device file
+ * @kvm: KVM to link
+ *
+ * When a VFIO device is first opened the KVM will be available in
+ * device->kvm if one was associated with the file.
+ */
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+{
+ struct vfio_group *group;
+
+ group = vfio_group_from_file(file);
+ if (group)
+ vfio_group_set_kvm(group, kvm);
+
+ if (vfio_device_from_file(file))
+ vfio_device_file_set_kvm(file, kvm);
+}
+EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
+
/*
* Sub-module support
*/
@@ -1172,6 +1409,9 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
void *buf;
struct vfio_info_cap_header *header, *tmp;
+ /* Ensure that the next capability struct will be aligned */
+ size = ALIGN(size, sizeof(u64));
+
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
@@ -1205,6 +1445,9 @@ void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
struct vfio_info_cap_header *tmp;
void *buf = (void *)caps->buf;
+ /* Capability structs should start with proper alignment */
+ WARN_ON(!IS_ALIGNED(offset, sizeof(u64)));
+
for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
tmp->next += offset;
}
@@ -1415,9 +1658,16 @@ static int __init vfio_init(void)
goto err_dev_class;
}
+ ret = vfio_cdev_init(vfio.device_class);
+ if (ret)
+ goto err_alloc_dev_chrdev;
+
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_alloc_dev_chrdev:
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
err_dev_class:
vfio_virqfd_exit();
err_virqfd:
@@ -1428,6 +1678,7 @@ err_virqfd:
static void __exit vfio_cleanup(void)
{
ida_destroy(&vfio.device_ida);
+ vfio_cdev_cleanup();
class_destroy(vfio.device_class);
vfio.device_class = NULL;
vfio_virqfd_exit();