summaryrefslogtreecommitdiff
path: root/drivers/vhost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/iotlb.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/vhost/vsock.c60
7 files changed, 80 insertions, 42 deletions
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 0fd3f87e913c..0582079e4bcc 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -83,7 +83,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
- * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * vhost_iotlb_del_range - delete overlapped ranges from vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df82b124170e..6414bd5741b8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -744,11 +744,9 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
if (copied != len)
return -EFAULT;
- xdp->data_hard_start = buf;
- xdp->data = buf + pad;
- xdp->data_end = xdp->data + len;
+ xdp_init_buff(xdp, buflen, NULL);
+ xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- xdp->frame_sz = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index d16c04dcc144..46f897e41217 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1430,11 +1430,6 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->vqs[index].vq.poll);
-}
-
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
@@ -1453,10 +1448,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
- vhost_work_flush(&vs->dev, &vs->vs_event_work);
+ vhost_work_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1740,11 +1732,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
- /*
- * Make sure cmds are not running before tearing them
- * down.
- */
- vhost_scsi_flush(vs);
+ }
+ /* Make sure cmds are not running before tearing them down. */
+ vhost_scsi_flush(vs);
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index fb41db3da611..210ab35a7ebf 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -383,7 +383,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
- vq->last_avail_idx = vq_state.avail_index;
+ vq->last_avail_idx = vq_state.split.avail_index;
break;
}
@@ -401,7 +401,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- vq_state.avail_index = vq->last_avail_idx;
+ vq_state.split.avail_index = vq->last_avail_idx;
if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5ccb0705beae..b9e853e6094d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
+void vhost_work_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
@@ -243,13 +243,13 @@ void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
wait_for_completion(&flush.wait_event);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_flush);
+EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
- vhost_work_flush(poll->dev, &poll->work);
+ vhost_work_dev_flush(poll->dev);
}
EXPORT_SYMBOL_GPL(vhost_poll_flush);
@@ -538,7 +538,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
- vhost_work_flush(dev, &attach.work);
+ vhost_work_dev_flush(dev);
return attach.ret;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b063324c7669..638bb640d6b4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -20,20 +20,20 @@ typedef void (*vhost_work_fn_t)(struct vhost_work *work);
#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct llist_node node;
- vhost_work_fn_t fn;
- unsigned long flags;
+ struct llist_node node;
+ vhost_work_fn_t fn;
+ unsigned long flags;
};
/* Poll a file (eventfd or socket) */
/* Note: there's nothing vhost specific about this structure. */
struct vhost_poll {
- poll_table table;
- wait_queue_head_t *wqh;
- wait_queue_entry_t wait;
- struct vhost_work work;
- __poll_t mask;
- struct vhost_dev *dev;
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_entry_t wait;
+ struct vhost_work work;
+ __poll_t mask;
+ struct vhost_dev *dev;
};
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
@@ -46,8 +46,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
-long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+void vhost_work_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5e78fb719602..f249622ef11b 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -31,7 +31,8 @@
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
};
enum {
@@ -56,6 +57,7 @@ struct vhost_vsock {
atomic_t queued_replies;
u32 guest_cid;
+ bool seqpacket_allow;
};
static u32 vhost_transport_get_local_cid(void)
@@ -112,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
+ bool restore_flag = false;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -168,9 +171,26 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
- if (payload_len > iov_len - sizeof(pkt->hdr))
+ if (payload_len > iov_len - sizeof(pkt->hdr)) {
payload_len = iov_len - sizeof(pkt->hdr);
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+ * created dynamically and are initialized with header
+ * of current packet(except length). But in case of
+ * SOCK_SEQPACKET, we also must clear record delimeter
+ * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
+ * packet with delimeter(which marks end of record),
+ * there will be sequence of packets with delimeter
+ * bit set. After initialized header will be copied to
+ * rx buffer, this bit will be restored.
+ */
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ restore_flag = true;
+ }
+ }
+
/* Set the correct length in the header */
pkt->hdr.len = cpu_to_le32(payload_len);
@@ -204,6 +224,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
+ if (restore_flag)
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
@@ -354,8 +377,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
- pkt->len = le32_to_cpu(pkt->hdr.len);
+ pkt->len = le32_to_cpu(pkt->hdr.len);
/* No payload */
if (!pkt->len)
@@ -398,6 +420,8 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+
static struct virtio_transport vhost_transport = {
.transport = {
.module = THIS_MODULE,
@@ -424,6 +448,11 @@ static struct virtio_transport vhost_transport = {
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vhost_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
@@ -441,6 +470,22 @@ static struct virtio_transport vhost_transport = {
.send_pkt = vhost_transport_send_pkt,
};
+static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct vhost_vsock *vsock;
+ bool seqpacket_allow = false;
+
+ rcu_read_lock();
+ vsock = vhost_vsock_get(remote_cid);
+
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -663,7 +708,7 @@ static void vhost_vsock_flush(struct vhost_vsock *vsock)
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
if (vsock->vqs[i].handle_kick)
vhost_poll_flush(&vsock->vqs[i].poll);
- vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
+ vhost_work_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)
@@ -689,7 +734,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
vsk->peer_shutdown = SHUTDOWN_MASK;
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -785,6 +830,9 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
goto err;
}
+ if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);