summaryrefslogtreecommitdiff
path: root/drivers/vhost/vhost.h
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2023-06-27 02:23:05 +0300
committerMichael S. Tsirkin <mst@redhat.com>2023-07-03 19:15:14 +0300
commitc1ecd8e9500797748ae4f79657971955d452d69d (patch)
tree4c84f7251cf4f7fb058865082bf4ee1310976e65 /drivers/vhost/vhost.h
parent1cdaafa1b8b4ef6052869c86ba2b41c0cff05957 (diff)
downloadlinux-c1ecd8e9500797748ae4f79657971955d452d69d.tar.xz
vhost: allow userspace to create workers
For vhost-scsi with 3 vqs or more and a workload that tries to use them in parallel like: fio --filename=/dev/sdb --direct=1 --rw=randrw --bs=4k \ --ioengine=libaio --iodepth=128 --numjobs=3 the single vhost worker thread will become a bottlneck and we are stuck at around 500K IOPs no matter how many jobs, virtqueues, and CPUs are used. To better utilize virtqueues and available CPUs, this patch allows userspace to create workers and bind them to vqs. You can have N workers per dev and also share N workers with M vqs on that dev. This patch adds the interface related code and the next patch will hook vhost-scsi into it. The patches do not try to hook net and vsock into the interface because: 1. multiple workers don't seem to help vsock. The problem is that with only 2 virtqueues we never fully use the existing worker when doing bidirectional tests. This seems to match vhost-scsi where we don't see the worker as a bottleneck until 3 virtqueues are used. 2. net already has a way to use multiple workers. Signed-off-by: Mike Christie <michael.christie@oracle.com> Message-Id: <20230626232307.97930-16-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/vhost.h')
-rw-r--r--drivers/vhost/vhost.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 31937e98c01b..4920ca63b8de 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -31,6 +31,7 @@ struct vhost_worker {
struct llist_head work_list;
u64 kcov_handle;
u32 id;
+ int attachment_cnt;
};
/* Poll a file (eventfd or socket) */
@@ -190,6 +191,8 @@ void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ void __user *argp);
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
void vhost_clear_msg(struct vhost_dev *dev);