summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/ioctl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/ioctl.c')
-rw-r--r--drivers/nvme/host/ioctl.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 61af1583356c..64ae8af01d9a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -120,19 +120,31 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
int ret;
- if (has_metadata && !supports_metadata)
- return -EINVAL;
+ if (!nvme_ctrl_sgl_supported(ctrl))
+ dev_warn_once(ctrl->device, "using unchecked data buffer\n");
+ if (has_metadata) {
+ if (!supports_metadata) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!nvme_ctrl_meta_sgl_supported(ctrl))
+ dev_warn_once(ctrl->device,
+ "using unchecked metadata buffer\n");
+ }
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
- return -EINVAL;
+ if (flags & NVME_IOCTL_VEC) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
@@ -430,21 +442,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly. Note that using the uring_cmd
- * helper for this is safe only because we check blk_rq_is_poll().
- * As that returns false if we're NOT on a polled queue, then it's
- * safe to use the polled completion helper.
- *
- * Otherwise, move the completion to task work.
+ * IOPOLL could potentially complete this request directly, but
+ * if multiple rings are polling on the same queue, then it's possible
+ * for one ring to find completions for another ring. Punting the
+ * completion via task_work will always direct it to the right
+ * location, rather than potentially complete requests for ringA
+ * under iopoll invocations from ringB.
*/
- if (blk_rq_is_poll(req)) {
- if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
- }
-
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
return RQ_END_IO_FREE;
}