summaryrefslogtreecommitdiff
path: root/drivers/vfio/pci/mlx5/cmd.h
diff options
context:
space:
mode:
authorYishai Hadas <yishaih@nvidia.com>2023-09-11 12:38:51 +0300
committerAlex Williamson <alex.williamson@redhat.com>2023-09-28 22:07:29 +0300
commit543640af84802e84f1e38ee102882d6ae1354701 (patch)
tree59d84b7a10abb0668f23200dbd13224c002218d3 /drivers/vfio/pci/mlx5/cmd.h
parent34a64c8eac2380a28eeab191dc5bf4e0dbadf7c6 (diff)
downloadlinux-543640af84802e84f1e38ee102882d6ae1354701.tar.xz
vfio/mlx5: Enable querying state size which is > 4GB
Once the device supports 'chunk mode' the driver can support state size which is larger than 4GB. In that case the device has the capability to split a single image to multiple chunks as long as the software provides a buffer in the minimum size reported by the device. The driver should query for the minimum buffer size required using QUERY_VHCA_MIGRATION_STATE command with the 'chunk' bit set in its input, in that case, the output will include both the minimum buffer size (i.e. required_umem_size) and also the remaining total size to be reported/used where that it will be applicable. At that point in the series the 'chunk' bit is off, the last patch will activate the feature once all pieces will be ready. Note: Before this change we were limited to 4GB state size as of 4 bytes max value based on the device specification for the query/save/load commands. Signed-off-by: Yishai Hadas <yishaih@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20230911093856.81910-5-yishaih@nvidia.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio/pci/mlx5/cmd.h')
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index aec4c69dd6c1..4fb37598c8e5 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -164,6 +164,7 @@ struct mlx5vf_pci_core_device {
u8 deferred_reset:1;
u8 mdev_detach:1;
u8 log_active:1;
+ u8 chunk_mode:1;
struct completion tracker_comp;
/* protect migration state */
struct mutex state_mutex;
@@ -186,7 +187,8 @@ enum {
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
- size_t *state_size, u8 query_flags);
+ size_t *state_size, u64 *total_size,
+ u8 query_flags);
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
const struct vfio_migration_ops *mig_ops,
const struct vfio_log_ops *log_ops);