diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/apple.c | 26 | ||||
-rw-r--r-- | drivers/nvme/host/auth.c | 16 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 76 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 18 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 138 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 2 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 61 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 37 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 11 |
12 files changed, 261 insertions, 131 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index e36aeb50b4ed..b317ce6c4ec3 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) apple_nvme_remove_cq(anv); } - nvme_disable_ctrl(&anv->ctrl, shutdown); + /* + * Always disable the NVMe controller after shutdown. + * We need to do this to bring it back up later anyway, and we + * can't do it while the firmware is not running (e.g. in the + * resume reset path before RTKit is initialized), so for Apple + * controllers it makes sense to unconditionally do it here. + * Additionally, this sequence of events is reliable, while + * others (like disabling after bringing back the firmware on + * resume) seem to run into trouble under some circumstances. + * + * Both U-Boot and m1n1 also use this convention (i.e. an ANS + * NVMe controller is handed off with firmware shut down, in an + * NVMe disabled state, after a clean shutdown). + */ + if (shutdown) + nvme_disable_ctrl(&anv->ctrl, shutdown); + nvme_disable_ctrl(&anv->ctrl, false); } WRITE_ONCE(anv->ioq.enabled, false); @@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work) goto out; } - if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) - apple_nvme_disable(anv, false); - /* RTKit must be shut down cleanly for the (soft)-reset to work */ if (apple_rtkit_is_running(anv->rtk)) { + /* reset the controller if it is enabled */ + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) + apple_nvme_disable(anv, false); dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); ret = apple_rtkit_shutdown(anv->rtk); if (ret) @@ -1493,7 +1509,7 @@ static int apple_nvme_probe(struct platform_device *pdev) } ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, - NVME_QUIRK_SKIP_CID_GEN); + NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS); if (ret) { dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); goto put_dev; diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index bb0abbe4491c..b57630d1d3b8 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context { int sess_key_len; }; +struct workqueue_struct *nvme_auth_wq; + #define nvme_auth_flags_from_qid(qid) \ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED #define nvme_auth_queue_from_qid(ctrl, qid) \ @@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) chap = &ctrl->dhchap_ctxs[qid]; cancel_work_sync(&chap->auth_work); - queue_work(nvme_wq, &chap->auth_work); + queue_work(nvme_auth_wq, &chap->auth_work); return 0; } EXPORT_SYMBOL_GPL(nvme_auth_negotiate); @@ -953,7 +955,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) goto err_free_dhchap_secret; if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) - return ret; + return 0; ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl), sizeof(*chap), GFP_KERNEL); @@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free); int __init nvme_init_auth(void) { + nvme_auth_wq = alloc_workqueue("nvme-auth-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!nvme_auth_wq) + return -ENOMEM; + nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (!nvme_chap_buf_cache) - return -ENOMEM; + goto err_destroy_workqueue; nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, mempool_free_slab, nvme_chap_buf_cache); @@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void) return 0; err_destroy_chap_buf_cache: kmem_cache_destroy(nvme_chap_buf_cache); +err_destroy_workqueue: + destroy_workqueue(nvme_auth_wq); return -ENOMEM; } @@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void) { mempool_destroy(nvme_chap_buf_pool); kmem_cache_destroy(nvme_chap_buf_cache); + destroy_workqueue(nvme_auth_wq); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e26b085a007a..8b6421141162 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1074,6 +1074,18 @@ static u32 nvme_known_admin_effects(u8 opcode) return 0; } +static u32 nvme_known_nvm_effects(u8 opcode) +{ + switch (opcode) { + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_write_uncor: + return NVME_CMD_EFFECTS_LBCC; + default: + return 0; + } +} + u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { u32 effects = 0; @@ -1081,16 +1093,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) if (ns) { if (ns->head->effects) effects = le32_to_cpu(ns->head->effects->iocs[opcode]); + if (ns->head->ids.csi == NVME_CSI_NVM) + effects |= nvme_known_nvm_effects(opcode); if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn_once(ctrl->device, - "IO command:%02x has unhandled effects:%08x\n", + "IO command:%02x has unusual effects:%08x\n", opcode, effects); - return 0; - } - if (ctrl->effects) - effects = le32_to_cpu(ctrl->effects->acs[opcode]); - effects |= nvme_known_admin_effects(opcode); + /* + * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, + * which would deadlock when done on an I/O command. Note that + * We already warn about an unusual effect above. + */ + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; + } else { + if (ctrl->effects) + effects = le32_to_cpu(ctrl->effects->acs[opcode]); + effects |= nvme_known_admin_effects(opcode); + } return effects; } @@ -2117,11 +2137,34 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, return nvme_submit_sync_cmd(ns->queue, c, data, 16); } +static int nvme_sc_to_pr_err(int nvme_sc) +{ + if (nvme_is_path_error(nvme_sc)) + return PR_STS_PATH_FAILED; + + switch (nvme_sc) { + case NVME_SC_SUCCESS: + return PR_STS_SUCCESS; + case NVME_SC_RESERVATION_CONFLICT: + return PR_STS_RESERVATION_CONFLICT; + case NVME_SC_ONCS_NOT_SUPPORTED: + return -EOPNOTSUPP; + case NVME_SC_BAD_ATTRIBUTES: + case NVME_SC_INVALID_OPCODE: + case NVME_SC_INVALID_FIELD: + case NVME_SC_INVALID_NS: + return -EINVAL; + default: + return PR_STS_IOERR; + } +} + static int nvme_pr_command(struct block_device *bdev, u32 cdw10, u64 key, u64 sa_key, u8 op) { struct nvme_command c = { }; u8 data[16] = { 0, }; + int ret; put_unaligned_le64(key, &data[0]); put_unaligned_le64(sa_key, &data[8]); @@ -2131,8 +2174,14 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10, if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && bdev->bd_disk->fops == &nvme_ns_head_ops) - return nvme_send_ns_head_pr_command(bdev, &c, data); - return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); + ret = nvme_send_ns_head_pr_command(bdev, &c, data); + else + ret = nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, + data); + if (ret < 0) + return ret; + + return nvme_sc_to_pr_err(ret); } static int nvme_pr_register(struct block_device *bdev, u64 old, @@ -4599,9 +4648,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_remove_namespaces); -static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) +static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct nvme_ctrl *ctrl = + const struct nvme_ctrl *ctrl = container_of(dev, struct nvme_ctrl, ctrl_device); struct nvmf_ctrl_options *opts = ctrl->opts; int ret; @@ -4872,7 +4921,9 @@ out_cleanup_admin_q: blk_mq_destroy_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q); out_free_tagset: - blk_mq_free_tag_set(ctrl->admin_tagset); + blk_mq_free_tag_set(set); + ctrl->admin_q = NULL; + ctrl->fabrics_q = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); @@ -4897,7 +4948,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, memset(set, 0, sizeof(*set)); set->ops = ops; - set->queue_depth = ctrl->sqsize + 1; + set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); /* * Some Apple controllers requires tags to be unique across admin and * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. @@ -4934,6 +4985,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, out_free_tag_set: blk_mq_free_tag_set(set); + ctrl->connect_q = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 4564f16a0b20..456ee42a6133 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3521,13 +3521,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, nvme_fc_init_queue(ctrl, 0); - ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, - &nvme_fc_admin_mq_ops, - struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, - ctrl->lport->ops->fcprqst_priv_sz)); - if (ret) - goto out_free_queues; - /* * Would have been nice to init io queues tag set as well. * However, we require interaction from the controller @@ -3537,10 +3530,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); if (ret) - goto out_cleanup_tagset; + goto out_free_queues; /* at this point, teardown path changes to ref counting on nvme ctrl */ + ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, + &nvme_fc_admin_mq_ops, + struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, + ctrl->lport->ops->fcprqst_priv_sz)); + if (ret) + goto fail_ctrl; + spin_lock_irqsave(&rport->lock, flags); list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); @@ -3592,8 +3592,6 @@ fail_ctrl: return ERR_PTR(-EIO); -out_cleanup_tagset: - nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_queues: kfree(ctrl->queues); out_free_ida: diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9ddda571f046..06f52db34be9 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -8,13 +8,27 @@ #include <linux/io_uring.h> #include "nvme.h" +enum { + NVME_IOCTL_VEC = (1 << 0), + NVME_IOCTL_PARTITION = (1 << 1), +}; + static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, - fmode_t mode) + unsigned int flags, fmode_t mode) { + u32 effects; + if (capable(CAP_SYS_ADMIN)) return true; /* + * Do not allow unprivileged passthrough on partitions, as that allows an + * escape from the containment of the partition. + */ + if (flags & NVME_IOCTL_PARTITION) + return false; + + /* * Do not allow unprivileged processes to send vendor specific or fabrics * commands as we can't be sure about their effects. */ @@ -43,11 +57,29 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, } /* - * Only allow I/O commands that transfer data to the controller if the - * special file is open for writing, but always allow I/O commands that - * transfer data from the controller. + * Check if the controller provides a Commands Supported and Effects log + * and marks this command as supported. If not reject unprivileged + * passthrough. + */ + effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); + if (!(effects & NVME_CMD_EFFECTS_CSUPP)) + return false; + + /* + * Don't allow passthrough for command that have intrusive (or unknown) + * effects. + */ + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | + NVME_CMD_EFFECTS_UUID_SEL | + NVME_CMD_EFFECTS_SCOPE_MASK)) + return false; + + /* + * Only allow I/O commands that transfer data to the controller or that + * change the logical block contents if the file descriptor is open for + * writing. */ - if (nvme_is_write(c)) + if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) return mode & FMODE_WRITE; return true; } @@ -130,7 +162,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, - bool vec) + unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; @@ -143,7 +175,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct iov_iter iter; /* fixedbufs is only for non-vectored io */ - if (WARN_ON_ONCE(vec)) + if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) return -EINVAL; ret = io_uring_cmd_import_fixed(ubuffer, bufflen, rq_data_dir(req), &iter, ioucmd); @@ -152,8 +184,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); } else { ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), - bufflen, GFP_KERNEL, vec, 0, 0, - rq_data_dir(req)); + bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, + 0, rq_data_dir(req)); } if (ret) @@ -183,9 +215,9 @@ out: } static int nvme_submit_user_cmd(struct request_queue *q, - struct nvme_command *cmd, u64 ubuffer, - unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, u64 *result, unsigned timeout, bool vec) + struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, + void __user *meta_buffer, unsigned meta_len, u32 meta_seed, + u64 *result, unsigned timeout, unsigned int flags) { struct nvme_ctrl *ctrl; struct request *req; @@ -201,7 +233,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, &meta, NULL, vec); + meta_len, meta_seed, &meta, NULL, flags); if (ret) return ret; } @@ -284,10 +316,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.apptag = cpu_to_le16(io.apptag); c.rw.appmask = cpu_to_le16(io.appmask); - return nvme_submit_user_cmd(ns->queue, &c, - io.addr, length, - metadata, meta_len, lower_32_bits(io.slba), NULL, 0, - false); + return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, + meta_len, lower_32_bits(io.slba), NULL, 0, 0); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, @@ -305,7 +335,8 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, } static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - struct nvme_passthru_cmd __user *ucmd, fmode_t mode) + struct nvme_passthru_cmd __user *ucmd, unsigned int flags, + fmode_t mode) { struct nvme_passthru_cmd cmd; struct nvme_command c; @@ -333,16 +364,15 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); - if (!nvme_cmd_allowed(ns, &c, mode)) + if (!nvme_cmd_allowed(ns, &c, 0, mode)) return -EACCES; if (cmd.timeout_ms) timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - cmd.addr, cmd.data_len, - nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, - 0, &result, timeout, false); + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), + cmd.metadata_len, 0, &result, timeout, 0); if (status >= 0) { if (put_user(result, &ucmd->result)) @@ -353,8 +383,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, } static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - struct nvme_passthru_cmd64 __user *ucmd, bool vec, - fmode_t mode) + struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, + fmode_t mode) { struct nvme_passthru_cmd64 cmd; struct nvme_command c; @@ -381,16 +411,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); - if (!nvme_cmd_allowed(ns, &c, mode)) + if (!nvme_cmd_allowed(ns, &c, flags, mode)) return -EACCES; if (cmd.timeout_ms) timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - cmd.addr, cmd.data_len, - nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, - 0, &cmd.result, timeout, vec); + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), + cmd.metadata_len, 0, &cmd.result, timeout, flags); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -551,7 +580,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); - if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode)) + if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode)) return -EACCES; d.metadata = READ_ONCE(cmd->metadata); @@ -621,9 +650,9 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, { switch (cmd) { case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ctrl, NULL, argp, mode); + return nvme_user_cmd(ctrl, NULL, argp, 0, mode); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp, false, mode); + return nvme_user_cmd64(ctrl, NULL, argp, 0, mode); default: return sed_ioctl(ctrl->opal_dev, cmd, argp); } @@ -648,14 +677,14 @@ struct nvme_user_io32 { #endif /* COMPAT_FOR_U64_ALIGNMENT */ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, - void __user *argp, fmode_t mode) + void __user *argp, unsigned int flags, fmode_t mode) { switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); return ns->head->ns_id; case NVME_IOCTL_IO_CMD: - return nvme_user_cmd(ns->ctrl, ns, argp, mode); + return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode); /* * struct nvme_user_io can have different padding on some 32-bit ABIs. * Just accept the compat version as all fields that are used are the @@ -666,37 +695,40 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, #endif case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, argp); - case NVME_IOCTL_IO64_CMD: - return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode); case NVME_IOCTL_IO64_CMD_VEC: - return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode); + flags |= NVME_IOCTL_VEC; + fallthrough; + case NVME_IOCTL_IO64_CMD: + return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode); default: return -ENOTTY; } } -static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg, - fmode_t mode) -{ - if (is_ctrl_ioctl(cmd)) - return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode); - return nvme_ns_ioctl(ns, cmd, arg, mode); -} - int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = bdev->bd_disk->private_data; + void __user *argp = (void __user *)arg; + unsigned int flags = 0; + + if (bdev_is_partition(bdev)) + flags |= NVME_IOCTL_PARTITION; - return __nvme_ioctl(ns, cmd, (void __user *)arg, mode); + if (is_ctrl_ioctl(cmd)) + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode); + return nvme_ns_ioctl(ns, cmd, argp, flags, mode); } long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); + void __user *argp = (void __user *)arg; - return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode); + if (is_ctrl_ioctl(cmd)) + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode); + return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); } static int nvme_uring_cmd_checks(unsigned int issue_flags) @@ -786,6 +818,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, void __user *argp = (void __user *)arg; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; + unsigned int flags = 0; + + if (bdev_is_partition(bdev)) + flags |= NVME_IOCTL_PARTITION; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); @@ -801,7 +837,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, mode); - ret = nvme_ns_ioctl(ns, cmd, argp, mode); + ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; @@ -826,7 +862,7 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, file->f_mode); - ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode); + ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; @@ -925,7 +961,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, kref_get(&ns->kref); up_read(&ctrl->namespaces_rwsem); - ret = nvme_user_cmd(ctrl, ns, argp, mode); + ret = nvme_user_cmd(ctrl, ns, argp, 0, mode); nvme_put_ns(ns); return ret; @@ -942,9 +978,9 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ctrl, NULL, argp, file->f_mode); + return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode); + return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode); case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp, file->f_mode); case NVME_IOCTL_RESET: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index c03093b6813c..fc39d01e7b63 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -376,6 +376,8 @@ static void nvme_ns_head_submit_bio(struct bio *bio) * pool from the original queue to allocate the bvecs from. */ bio = bio_split_to_limits(bio); + if (!bio) + return; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 6bbb73ef8b25..424c8a467a0c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - if (req->cmd_flags & REQ_NVME_MPATH) + if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) trace_block_bio_complete(ns->head->disk->queue, req->bio); } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f0f8027644bb..c734934c407c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -36,7 +36,7 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * These can be higher, but we need to ensure that any command doesn't @@ -144,9 +144,9 @@ struct nvme_dev { mempool_t *iod_mempool; /* shadow doorbell buffer support: */ - u32 *dbbuf_dbs; + __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; - u32 *dbbuf_eis; + __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; /* host memory buffer support: */ @@ -208,10 +208,10 @@ struct nvme_queue { #define NVMEQ_SQ_CMB 1 #define NVMEQ_DELETE_ERROR 2 #define NVMEQ_POLLED 3 - u32 *dbbuf_sq_db; - u32 *dbbuf_cq_db; - u32 *dbbuf_sq_ei; - u32 *dbbuf_cq_ei; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; struct completion delete_done; }; @@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) } /* Update dbbuf and return true if an MMIO is required */ -static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, - volatile u32 *dbbuf_ei) +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + volatile __le32 *dbbuf_ei) { if (dbbuf_db) { - u16 old_value; + u16 old_value, event_idx; /* * Ensure that the queue is written before updating @@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ wmb(); - old_value = *dbbuf_db; - *dbbuf_db = value; + old_value = le32_to_cpu(*dbbuf_db); + *dbbuf_db = cpu_to_le32(value); /* * Ensure that the doorbell is updated before reading the event @@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ mb(); - if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) + event_idx = le32_to_cpu(*dbbuf_ei); + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) return false; } @@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ static int nvme_pci_npages_prp(void) { - unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } /* @@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void) static int nvme_pci_npages_sgl(void) { return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), - PAGE_SIZE); + NVME_CTRL_PAGE_SIZE); } static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, @@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } else { - sge->length = cpu_to_le32(PAGE_SIZE); + sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); sge->type = NVME_SGL_FMT_SEG_DESC << 4; } } @@ -1361,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) else nvme_poll_irqdisable(nvmeq); - if (blk_mq_request_completed(req)) { + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid); @@ -2332,10 +2333,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, sizeof(struct nvme_command)); - if (result > 0) + if (result > 0) { dev->q_depth = result; - else + dev->ctrl.sqsize = result - 1; + } else { dev->cmb_use_sqes = false; + } } do { @@ -2530,13 +2533,12 @@ static int nvme_pci_enable(struct nvme_dev *dev) */ result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (result < 0) - return result; + goto disable; dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, io_queue_depth); - dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; @@ -2577,15 +2579,20 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", dev->q_depth); } - + dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ nvme_map_cmb(dev); pci_enable_pcie_error_reporting(pdev); pci_save_state(pdev); - return nvme_pci_configure_admin_queue(dev); + result = nvme_pci_configure_admin_queue(dev); + if (result) + goto free_irq; + return result; + free_irq: + pci_free_irq_vectors(pdev); disable: pci_disable_device(pdev); return result; @@ -3095,6 +3102,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) nvme_start_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl); + flush_work(&dev->ctrl.scan_work); return 0; out_disable: @@ -3493,7 +3501,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_SINGLE_VECTOR | NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | - NVME_QUIRK_SKIP_CID_GEN }, + NVME_QUIRK_SKIP_CID_GEN | + NVME_QUIRK_IDENTIFY_CNS }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index b69b89166b6b..8cedc1ef496c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1537,6 +1537,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) queue->sock->sk->sk_rcvtimeo = 10 * HZ; queue->sock->sk->sk_allocation = GFP_ATOMIC; + queue->sock->sk->sk_use_task_frag = false; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; queue->data_remaining = 0; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 53a004ea320c..6a54ed6fb121 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -164,26 +164,31 @@ out: static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) { - log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); - - log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_get_log_page] = + log->acs[nvme_admin_identify] = + log->acs[nvme_admin_abort_cmd] = + log->acs[nvme_admin_set_features] = + log->acs[nvme_admin_get_features] = + log->acs[nvme_admin_async_event] = + log->acs[nvme_admin_keep_alive] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + + log->iocs[nvme_cmd_read] = + log->iocs[nvme_cmd_flush] = + log->iocs[nvme_cmd_dsm] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + log->iocs[nvme_cmd_write] = + log->iocs[nvme_cmd_write_zeroes] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); } static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) { - log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_zone_append] = + log->iocs[nvme_cmd_zone_mgmt_send] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); + log->iocs[nvme_cmd_zone_mgmt_recv] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); } static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ab2627e17bb9..1ab6601fdd5c 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); - if (!queue) + if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; + nvmet_fc_tgt_a_put(iod->assoc); + } } } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 79af5140af8b..adc0958755d6 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) } /* - * If there are effects for the command we are about to execute, or - * an end_req function we need to use nvme_execute_passthru_rq() - * synchronously in a work item seeing the end_req function and - * nvme_passthru_end() can't be called in the request done callback - * which is typically in interrupt context. + * If a command needs post-execution fixups, or there are any + * non-trivial effects, make sure to execute the command synchronously + * in a workqueue so that nvme_passthru_end gets called. */ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); - if (req->p.use_workqueue || effects) { + if (req->p.use_workqueue || + (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); |