summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2025-12-30 09:46:44 +0300
committerMichael S. Tsirkin <mst@redhat.com>2025-12-31 13:39:18 +0300
commit03f05c4eeb7bc5019deb25f7415a7af8dc3fdd3f (patch)
treeaaa28eff7cd7902a68497966a6a31b5369701701
parent1208473f9b5eb273e787bb1b07a4b2a323692a10 (diff)
downloadlinux-03f05c4eeb7bc5019deb25f7415a7af8dc3fdd3f.tar.xz
virtio_ring: determine descriptor flags at one time
Let's determine the last descriptor by counting the number of sg. This would be consistent with packed virtqueue implementation and ease the future in-order implementation. Acked-by: Eugenio Pérez <eperezma@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20251230064649.55597-15-jasowang@redhat.com>
-rw-r--r--drivers/virtio/virtio_ring.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d0904ac0aa93..e55b26a03037 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -574,7 +574,7 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
struct vring_desc_extra *extra;
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, prev, err_idx;
+ unsigned int i, n, avail, descs_used, err_idx, sg_count = 0;
int head;
bool indirect;
@@ -634,42 +634,40 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
u32 len;
+ u16 flags = 0;
+
+ if (++sg_count != total_sg)
+ flags |= VRING_DESC_F_NEXT;
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
goto unmap_release;
- prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
- VRING_DESC_F_NEXT,
- premapped);
+ i = virtqueue_add_desc_split(vq, desc, extra, i, addr,
+ len, flags, premapped);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
u32 len;
+ u16 flags = VRING_DESC_F_WRITE;
+
+ if (++sg_count != total_sg)
+ flags |= VRING_DESC_F_NEXT;
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
goto unmap_release;
- prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
- VRING_DESC_F_NEXT |
- VRING_DESC_F_WRITE,
- premapped);
+ i = virtqueue_add_desc_split(vq, desc, extra, i, addr,
+ len, flags, premapped);
}
}
- /* Last one doesn't continue. */
- desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
- vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
- ~VRING_DESC_F_NEXT;
if (indirect) {
/* Now that the indirect table is filled in, map it. */