diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 132 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 6 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 17 | ||||
-rw-r--r-- | drivers/nvme/host/hwmon.c | 31 | ||||
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 7 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 12 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 26 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 156 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 51 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 85 | ||||
-rw-r--r-- | drivers/nvme/host/trace.c | 53 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 11 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 118 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 37 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 83 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 9 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 15 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-file.c | 5 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 20 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 26 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 62 | ||||
-rw-r--r-- | drivers/nvme/target/trace.h | 9 |
24 files changed, 630 insertions, 355 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ce1b61519441..e68a8c4ac5a6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_reset_ctrl); -int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) +static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) { int ret; @@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) return ret; } -EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) { @@ -280,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status) static void nvme_retry_req(struct request *req) { - struct nvme_ns *ns = req->q->queuedata; unsigned long delay = 0; u16 crd; /* The mask and shift result must be <= 3 */ crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; - if (ns && crd) - delay = ns->ctrl->crdt[crd - 1] * 100; + if (crd) + delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; nvme_req(req)->retries++; blk_mq_requeue_request(req, false); @@ -331,7 +329,7 @@ static inline void nvme_end_req(struct request *req) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); - nvme_trace_bio_complete(req, status); + nvme_trace_bio_complete(req); blk_mq_end_request(req, status); } @@ -357,6 +355,21 @@ void nvme_complete_rq(struct request *req) } EXPORT_SYMBOL_GPL(nvme_complete_rq); +/* + * Called to unwind from ->queue_rq on a failed command submission so that the + * multipathing code gets called to potentially failover to another path. + * The caller needs to unwind all transport specific resource allocations and + * must return propagate the return value. + */ +blk_status_t nvme_host_path_error(struct request *req) +{ + nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; + blk_mq_set_request_complete(req); + nvme_complete_rq(req); + return BLK_STS_OK; +} +EXPORT_SYMBOL_GPL(nvme_host_path_error); + bool nvme_cancel_request(struct request *req, void *data, bool reserved) { dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, @@ -372,6 +385,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) } EXPORT_SYMBOL_GPL(nvme_cancel_request); +void nvme_cancel_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->tagset) { + blk_mq_tagset_busy_iter(ctrl->tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_tagset); + +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->admin_tagset) { + blk_mq_tagset_busy_iter(ctrl->admin_tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); + bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state) { @@ -578,7 +611,7 @@ struct request *nvme_alloc_request(struct request_queue *q, } EXPORT_SYMBOL_GPL(nvme_alloc_request); -struct request *nvme_alloc_request_qid(struct request_queue *q, +static struct request *nvme_alloc_request_qid(struct request_queue *q, struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) { struct request *req; @@ -589,7 +622,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q, nvme_init_request(req, cmd); return req; } -EXPORT_SYMBOL_GPL(nvme_alloc_request_qid); static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) { @@ -844,11 +876,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { - struct nvme_ns *ns = req->rq_disk->private_data; + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; struct page *page = req->special_vec.bv_page; - if (page == ns->ctrl->discard_page) - clear_bit_unlock(0, &ns->ctrl->discard_page_busy); + if (page == ctrl->discard_page) + clear_bit_unlock(0, &ctrl->discard_page_busy); else kfree(page_address(page) + req->special_vec.bv_offset); } @@ -927,7 +959,7 @@ static void nvme_execute_rq_polled(struct request_queue *q, rq->cmd_flags |= REQ_HIPRI; rq->end_io_data = &wait; - blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); + blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq); while (!completion_done(&wait)) { blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); @@ -966,7 +998,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, if (poll) nvme_execute_rq_polled(req->q, NULL, req, at_head); else - blk_execute_rq(req->q, NULL, req, at_head); + blk_execute_rq(NULL, req, at_head); if (result) *result = nvme_req(req)->result; if (nvme_req(req)->flags & NVME_REQ_CANCELLED) @@ -1103,7 +1135,7 @@ void nvme_execute_passthru_rq(struct request *rq) u32 effects; effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); - blk_execute_rq(rq->q, disk, rq, 0); + blk_execute_rq(disk, rq, 0); nvme_passthru_end(ctrl, effects); } EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); @@ -1115,7 +1147,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; - struct gendisk *disk = ns ? ns->disk : NULL; + struct block_device *bdev = ns ? ns->disk->part0 : NULL; struct request *req; struct bio *bio = NULL; void *meta = NULL; @@ -1135,8 +1167,9 @@ static int nvme_submit_user_cmd(struct request_queue *q, if (ret) goto out; bio = req->bio; - bio->bi_disk = disk; - if (disk && meta_buffer && meta_len) { + if (bdev) + bio_set_dev(bio, bdev); + if (bdev && meta_buffer && meta_len) { meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, meta_seed, write); if (IS_ERR(meta)) { @@ -1204,7 +1237,7 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl) rq->timeout = ctrl->kato * HZ; rq->end_io_data = ctrl; - blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); + blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); return 0; } @@ -1545,8 +1578,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) } length = (io.nblocks + 1) << ns->lba_shift; - meta_len = (io.nblocks + 1) * ns->ms; - metadata = nvme_to_user_ptr(io.metadata); + + if ((io.control & NVME_RW_PRINFO_PRACT) && + ns->ms == sizeof(struct t10_pi_tuple)) { + /* + * Protection information is stripped/inserted by the + * controller. + */ + if (nvme_to_user_ptr(io.metadata)) + return -EINVAL; + meta_len = 0; + metadata = NULL; + } else { + meta_len = (io.nblocks + 1) * ns->ms; + metadata = nvme_to_user_ptr(io.metadata); + } if (ns->features & NVME_NS_EXT_LBAS) { length += meta_len; @@ -2114,9 +2160,8 @@ static void nvme_update_disk_info(struct gendisk *disk, nvme_config_discard(disk, ns); nvme_config_write_zeroes(disk, ns); - if ((id->nsattr & NVME_NS_ATTR_RO) || - test_bit(NVME_NS_FORCE_RO, &ns->flags)) - set_disk_ro(disk, true); + set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || + test_bit(NVME_NS_FORCE_RO, &ns->flags)); } static inline bool nvme_first_scan(struct gendisk *disk) @@ -2165,17 +2210,18 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ns->lba_shift = id->lbaf[lbaf].ds; nvme_set_queue_limits(ns->ctrl, ns->queue); + ret = nvme_configure_metadata(ns, id); + if (ret) + goto out_unfreeze; + nvme_set_chunk_sectors(ns, id); + nvme_update_disk_info(ns->disk, ns, id); + if (ns->head->ids.csi == NVME_CSI_ZNS) { ret = nvme_update_zone_info(ns, lbaf); if (ret) goto out_unfreeze; } - ret = nvme_configure_metadata(ns, id); - if (ret) - goto out_unfreeze; - nvme_set_chunk_sectors(ns, id); - nvme_update_disk_info(ns->disk, ns, id); blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { @@ -2819,7 +2865,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev, struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); - return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); + return sysfs_emit(buf, "%s\n", subsys->subnqn); } static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); @@ -2849,7 +2895,7 @@ static struct attribute *nvme_subsys_attrs[] = { NULL, }; -static struct attribute_group nvme_subsys_attrs_group = { +static const struct attribute_group nvme_subsys_attrs_group = { .attrs = nvme_subsys_attrs, }; @@ -2858,6 +2904,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = { NULL, }; +static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) +{ + return ctrl->opts && ctrl->opts->discovery_nqn; +} + static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { @@ -2877,7 +2928,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, } if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || - (ctrl->opts && ctrl->opts->discovery_nqn)) + nvme_discovery_ctrl(ctrl)) continue; dev_err(ctrl->device, @@ -3146,7 +3197,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } - if (!ctrl->opts->discovery_nqn && !ctrl->kas) { + if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { dev_err(ctrl->device, "keep-alive support is mandatory for fabrics\n"); ret = -EINVAL; @@ -3186,7 +3237,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (ret < 0) return ret; - if (!ctrl->identified) { + if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { ret = nvme_hwmon_init(ctrl); if (ret < 0) return ret; @@ -3507,7 +3558,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); + return sysfs_emit(buf, "%s\n", ctrl->ops->name); } static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); @@ -3541,7 +3592,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); + return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); } static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); @@ -3551,7 +3602,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); + return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); } static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); @@ -3561,7 +3612,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); + return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); } static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); @@ -3679,7 +3730,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, return a->mode; } -static struct attribute_group nvme_dev_attrs_group = { +static const struct attribute_group nvme_dev_attrs_group = { .attrs = nvme_dev_attrs, .is_visible = nvme_dev_attrs_are_visible, }; @@ -3813,7 +3864,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, } } - list_add_tail(&ns->siblings, &head->list); + list_add_tail_rcu(&ns->siblings, &head->list); ns->head = head; mutex_unlock(&ctrl->subsys->lock); return 0; @@ -4422,6 +4473,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); @@ -4434,7 +4486,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl) struct nvme_effects_log *cel; unsigned long i; - xa_for_each (&ctrl->cels, i, cel) { + xa_for_each(&ctrl->cels, i, cel) { xa_erase(&ctrl->cels, i); kfree(cel); } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 72ac00173500..5dfd806fc2d2 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; - - nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; - blk_mq_start_request(rq); - nvme_complete_rq(rq); - return BLK_STS_OK; + return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 38373a0e86ef..20dadd86e981 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -166,6 +166,7 @@ struct nvme_fc_ctrl { struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set tag_set; + struct work_struct ioerr_work; struct delayed_work connect_work; struct kref ref; @@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, } static void +nvme_fc_ctrl_ioerr_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, ioerr_work); + + nvme_fc_error_recovery(ctrl, "transport detected io error"); +} + +static void nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) { struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); @@ -2046,7 +2056,7 @@ done: check_error: if (terminate_assoc) - nvme_fc_error_recovery(ctrl, "transport detected io error"); + queue_work(nvme_reset_wq, &ctrl->ioerr_work); } static int @@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + cancel_work_sync(&ctrl->ioerr_work); cancel_delayed_work_sync(&ctrl->connect_work); /* * kill the association on the link side. this will block @@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); + INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); spin_lock_init(&ctrl->lock); /* io queue count */ @@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, fail_ctrl: nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); + cancel_work_sync(&ctrl->ioerr_work); cancel_work_sync(&ctrl->ctrl.reset_work); cancel_delayed_work_sync(&ctrl->connect_work); @@ -3776,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = { NULL }; -static struct attribute_group nvme_fc_attr_group = { +static const struct attribute_group nvme_fc_attr_group = { .attrs = nvme_fc_attrs, }; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 552dbc04567b..8f9e96986780 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { int nvme_hwmon_init(struct nvme_ctrl *ctrl) { - struct device *dev = ctrl->dev; + struct device *dev = ctrl->device; struct nvme_hwmon_data *data; struct device *hwmon; int err; - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return 0; @@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) err = nvme_hwmon_get_smart_log(data); if (err) { - dev_warn(ctrl->device, - "Failed to read smart log (error %d)\n", err); - devm_kfree(dev, data); + dev_warn(dev, "Failed to read smart log (error %d)\n", err); + kfree(data); return err; } - hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, - &nvme_hwmon_chip_info, - NULL); + hwmon = hwmon_device_register_with_info(dev, "nvme", + data, &nvme_hwmon_chip_info, + NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - devm_kfree(dev, data); + kfree(data); } - + ctrl->hwmon_device = hwmon; return 0; } + +void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ + if (ctrl->hwmon_device) { + struct nvme_hwmon_data *data = + dev_get_drvdata(ctrl->hwmon_device); + + hwmon_device_unregister(ctrl->hwmon_device); + ctrl->hwmon_device = NULL; + kfree(data); + } +} diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 470cef3abec3..b705988629f2 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -695,7 +695,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd, rq->end_io_data = rqd; - blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); + blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io); return 0; @@ -757,7 +757,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q, { bool write = nvme_is_write((struct nvme_command *)vcmd); struct nvm_dev *dev = ns->ndev; - struct gendisk *disk = ns->disk; struct request *rq; struct bio *bio = NULL; __le64 *ppa_list = NULL; @@ -817,10 +816,10 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q, vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); } - bio->bi_disk = disk; + bio_set_dev(bio, ns->disk->part0); } - blk_execute_rq(q, NULL, rq, 0); + blk_execute_rq(NULL, rq, 0); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ret = -EINTR; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9ac762b28811..a1d476e1ac02 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, } for (ns = nvme_next_ns(head, old); - ns != old; + ns && ns != old; ns = nvme_next_ns(head, ns)) { if (nvme_path_is_disabled(ns)) continue; @@ -296,7 +296,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) { - struct nvme_ns_head *head = bio->bi_disk->private_data; + struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; struct device *dev = disk_to_dev(head->disk); struct nvme_ns *ns; blk_qc_t ret = BLK_QC_T_NONE; @@ -312,7 +312,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (likely(ns)) { - bio->bi_disk = ns->disk; + bio_set_dev(bio, ns->disk->part0); bio->bi_opf |= REQ_NVME_MPATH; trace_block_bio_remap(bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); @@ -352,7 +352,7 @@ static void nvme_requeue_work(struct work_struct *work) * Reset disk to the mpath node and resubmit to select a new * path. */ - bio->bi_disk = head->disk; + bio_set_dev(bio, head->disk->part0); submit_bio_noacct(bio); } } @@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) if (blk_queue_stable_writes(ns->queue) && ns->head->disk) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->head->disk->queue); +#ifdef CONFIG_BLK_DEV_ZONED + if (blk_queue_is_zoned(ns->queue) && ns->head->disk) + ns->head->disk->queue->nr_zones = ns->queue->nr_zones; +#endif } void nvme_mpath_remove_disk(struct nvme_ns_head *head) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7e49f61f81df..07b34175c6ce 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -144,6 +144,12 @@ enum nvme_quirks { * NVMe 1.3 compliance. */ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), + + /* + * The controller does not properly handle DMA addresses over + * 48 bits. + */ + NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), }; /* @@ -246,6 +252,9 @@ struct nvme_ctrl { struct rw_semaphore namespaces_rwsem; struct device ctrl_device; struct device *device; /* char device */ +#ifdef CONFIG_NVME_HWMON + struct device *hwmon_device; +#endif struct cdev cdev; struct work_struct reset_work; struct work_struct delete_work; @@ -575,7 +584,10 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) } void nvme_complete_rq(struct request *req); +blk_status_t nvme_host_path_error(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); +void nvme_cancel_tagset(struct nvme_ctrl *ctrl); +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state); bool nvme_wait_reset(struct nvme_ctrl *ctrl); @@ -610,8 +622,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, blk_mq_req_flags_t flags); -struct request *nvme_alloc_request_qid(struct request_queue *q, - struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); void nvme_cleanup_cmd(struct request *req); blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, struct nvme_command *cmd); @@ -630,7 +640,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); -int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); @@ -675,8 +684,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) kblockd_schedule_work(&head->requeue_work); } -static inline void nvme_trace_bio_complete(struct request *req, - blk_status_t status) +static inline void nvme_trace_bio_complete(struct request *req) { struct nvme_ns *ns = req->q->queuedata; @@ -731,8 +739,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { } -static inline void nvme_trace_bio_complete(struct request *req, - blk_status_t status) +static inline void nvme_trace_bio_complete(struct request *req) { } static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, @@ -814,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) #ifdef CONFIG_NVME_HWMON int nvme_hwmon_init(struct nvme_ctrl *ctrl); +void nvme_hwmon_exit(struct nvme_ctrl *ctrl); #else static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) { return 0; } + +static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ +} #endif u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b4385cb0ff60..7b6632c00ffd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -23,6 +23,7 @@ #include <linux/t10-pi.h> #include <linux/types.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/sed-opal.h> #include <linux/pci-p2pdma.h> @@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) return true; } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_free_prps(struct nvme_dev *dev, struct request *req) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; - dma_addr_t dma_addr = iod->first_dma, next_dma_addr; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; int i; - if (iod->dma_len) { - dma_unmap_page(dev->dev, dma_addr, iod->dma_len, - rq_dma_dir(req)); - return; + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; } - WARN_ON_ONCE(!iod->nents); +} - if (is_pci_p2pdma_page(sg_page(iod->sg))) - pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, - rq_dma_dir(req)); - else - dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); +static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) +{ + const int last_sg = SGES_PER_PAGE - 1; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + for (i = 0; i < iod->npages; i++) { + struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); - if (iod->npages == 0) - dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], - dma_addr); + dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); + dma_addr = next_dma_addr; + } - for (i = 0; i < iod->npages; i++) { - void *addr = nvme_pci_iod_list(req)[i]; +} - if (iod->use_sgl) { - struct nvme_sgl_desc *sg_list = addr; +static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - next_dma_addr = - le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); - } else { - __le64 *prp_list = addr; + if (is_pci_p2pdma_page(sg_page(iod->sg))) + pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, + rq_dma_dir(req)); + else + dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); +} - next_dma_addr = le64_to_cpu(prp_list[last_prp]); - } +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - dma_pool_free(dev->prp_page_pool, addr, dma_addr); - dma_addr = next_dma_addr; + if (iod->dma_len) { + dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; } + WARN_ON_ONCE(!iod->nents); + + nvme_unmap_sg(dev, req); + if (iod->npages == 0) + dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], + iod->first_dma); + else if (iod->use_sgl) + nvme_free_sgls(dev, req); + else + nvme_free_prps(dev, req); mempool_free(iod->sg, dev->iod_mempool); } @@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) - return BLK_STS_RESOURCE; + goto free_prps; list[iod->npages++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); @@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } - done: cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); - return BLK_STS_OK; - - bad_sgl: +free_prps: + nvme_free_prps(dev, req); + return BLK_STS_RESOURCE; +bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), iod->nents); @@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); if (!sg_list) - return BLK_STS_RESOURCE; + goto free_sgls; i = 0; nvme_pci_iod_list(req)[iod->npages++] = sg_list; @@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, } while (--entries > 0); return BLK_STS_OK; +free_sgls: + nvme_free_sgls(dev, req); + return BLK_STS_RESOURCE; } static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, @@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); iod->nents = blk_rq_map_sg(req->q, req, iod->sg); if (!iod->nents) - goto out; + goto out_free_sg; if (is_pci_p2pdma_page(sg_page(iod->sg))) nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, @@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (!nr_mapped) - goto out; + goto out_free_sg; iod->use_sgl = nvme_pci_use_sgls(dev, req); if (iod->use_sgl) ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); else ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); -out: if (ret != BLK_STS_OK) - nvme_unmap_data(dev, req); + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + nvme_unmap_sg(dev, req); +out_free_sg: + mempool_free(iod->sg, dev->iod_mempool); return ret; } @@ -967,6 +997,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) { struct nvme_completion *cqe = &nvmeq->cqes[idx]; + __u16 command_id = READ_ONCE(cqe->command_id); struct request *req; /* @@ -975,17 +1006,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { + if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { nvme_complete_async_event(&nvmeq->dev->ctrl, cqe->status, &cqe->result); return; } - req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id); if (unlikely(!req)) { dev_warn(nvmeq->dev->ctrl.device, "invalid id %d completed on queue %d\n", - cqe->command_id, le16_to_cpu(cqe->sq_id)); + command_id, le16_to_cpu(cqe->sq_id)); return; } @@ -1326,7 +1357,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } abort_req->end_io_data = NULL; - blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); + blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); /* * The aborted req will be completed on receiving the abort req. @@ -1794,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (dev->cmb_size) return; + if (NVME_CAP_CMBS(dev->ctrl.cap)) + writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -1808,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev) return; /* + * Tell the controller about the host side address mapping the CMB, + * and enable CMB decoding for the NVMe 1.4+ scheme: + */ + if (NVME_CAP_CMBS(dev->ctrl.cap)) { + hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | + (pci_bus_address(pdev, bar) + offset), + dev->bar + NVME_REG_CMBMSC); + } + + /* * Controllers may support a CMB size larger than their BAR, * for example, due to being behind a bridge. Reduce the CMB to * the reported size of the BAR @@ -2237,7 +2281,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); - blk_execute_rq_nowait(q, NULL, req, false, + blk_execute_rq_nowait(NULL, req, false, opcode == nvme_admin_delete_cq ? nvme_del_cq_end : nvme_del_queue_end); return 0; @@ -2318,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev) { int result = -ENOMEM; struct pci_dev *pdev = to_pci_dev(dev->dev); + int dma_address_bits = 64; if (pci_enable_device_mem(pdev)) return result; pci_set_master(pdev); - if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) + if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) + dma_address_bits = 48; + if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) goto disable; if (readl(dev->bar + NVME_REG_CSTS) == -1) { @@ -3196,7 +3243,10 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ @@ -3212,6 +3262,22 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cf6c49d09c82..53ac4d7442ba 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -97,6 +97,7 @@ struct nvme_rdma_queue { struct completion cm_done; bool pi_support; int cq_size; + struct mutex queue_lock; }; struct nvme_rdma_ctrl { @@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, int ret; queue = &ctrl->queues[idx]; + mutex_init(&queue->queue_lock); queue->ctrl = ctrl; if (idx && ctrl->ctrl.max_integrity_segments) queue->pi_support = true; @@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, if (IS_ERR(queue->cm_id)) { dev_info(ctrl->ctrl.device, "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); - return PTR_ERR(queue->cm_id); + ret = PTR_ERR(queue->cm_id); + goto out_destroy_mutex; } if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) @@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, out_destroy_cm_id: rdma_destroy_id(queue->cm_id); nvme_rdma_destroy_queue_ib(queue); +out_destroy_mutex: + mutex_destroy(&queue->queue_lock); return ret; } @@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { - if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) - return; - __nvme_rdma_stop_queue(queue); + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + __nvme_rdma_stop_queue(queue); + mutex_unlock(&queue->queue_lock); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) @@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); + mutex_destroy(&queue->queue_lock); } static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) @@ -912,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, error = nvme_init_identify(&ctrl->ctrl); if (error) - goto out_stop_queue; + goto out_quiesce_queue; return 0; +out_quiesce_queue: + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); out_stop_queue: nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->ctrl.admin_q); @@ -994,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) out_wait_freeze_timed_out: nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); out_cleanup_connect_q: + nvme_cancel_tagset(&ctrl->ctrl); if (new) blk_cleanup_queue(ctrl->ctrl.connect_q); out_free_tag_set: @@ -1012,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); - if (ctrl->ctrl.admin_tagset) { - blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, - nvme_cancel_request, &ctrl->ctrl); - blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); - } + nvme_cancel_admin_tagset(&ctrl->ctrl); if (remove) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); @@ -1030,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_stop_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); - if (ctrl->ctrl.tagset) { - blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, - nvme_cancel_request, &ctrl->ctrl); - blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); - } + nvme_cancel_tagset(&ctrl->ctrl); if (remove) nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); @@ -1137,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) return 0; destroy_io: - if (ctrl->ctrl.queue_count > 1) + if (ctrl->ctrl.queue_count > 1) { + nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + nvme_cancel_tagset(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, new); + } destroy_admin: + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl, new); return ret; } @@ -1461,7 +1474,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, if (unlikely(nr)) goto mr_put; - nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c, + nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, req->mr->sig_attrs, ns->pi_type); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); @@ -2085,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, err_unmap: nvme_rdma_unmap_data(queue, rq); err: - if (err == -ENOMEM || err == -EAGAIN) + if (err == -EIO) + ret = nvme_host_path_error(rq); + else if (err == -ENOMEM || err == -EAGAIN) ret = BLK_STS_RESOURCE; else ret = BLK_STS_IOERR; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 1ba659927442..69f59d2c5799 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -76,6 +76,7 @@ struct nvme_tcp_queue { struct work_struct io_work; int io_cpu; + struct mutex queue_lock; struct mutex send_mutex; struct llist_head req_list; struct list_head send_list; @@ -201,15 +202,10 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) { - return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, + return min_t(size_t, iov_iter_single_seg_count(&req->iter), req->pdu_len - req->pdu_sent); } -static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req) -{ - return req->iter.iov_offset; -} - static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) { return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? @@ -228,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req, struct request *rq = blk_mq_rq_from_pdu(req); struct bio_vec *vec; unsigned int size; - int nsegs; + int nr_bvec; size_t offset; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { vec = &rq->special_vec; - nsegs = 1; + nr_bvec = 1; size = blk_rq_payload_bytes(rq); offset = 0; } else { struct bio *bio = req->curr_bio; + struct bvec_iter bi; + struct bio_vec bv; vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); - nsegs = bio_segments(bio); + nr_bvec = 0; + bio_for_each_bvec(bv, bio, bi) { + nr_bvec++; + } size = bio->bi_iter.bi_size; offset = bio->bi_iter.bi_bvec_done; } - iov_iter_bvec(&req->iter, dir, vec, nsegs, size); + iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); req->iter.iov_offset = offset; } @@ -262,6 +263,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, } } +static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) +{ + int ret; + + /* drain the send queue as much as we can... */ + do { + ret = nvme_tcp_try_send(queue); + } while (ret > 0); +} + static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, bool sync, bool last) { @@ -276,10 +287,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * directly, otherwise queue io_work. Also, only do that if we * are on the same cpu, so we don't introduce contention. */ - if (queue->io_cpu == smp_processor_id() && + if (queue->io_cpu == __smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { queue->more_requests = !last; - nvme_tcp_try_send(queue); + nvme_tcp_send_all(queue); queue->more_requests = false; mutex_unlock(&queue->send_mutex); } else if (last) { @@ -972,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) crypto_ahash_init(queue->snd_hash); - nvme_tcp_init_iter(req, WRITE); } else { nvme_tcp_done_send_req(queue); } @@ -1005,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) crypto_ahash_init(queue->snd_hash); - if (!req->data_sent) - nvme_tcp_init_iter(req, WRITE); return 1; } req->offset += ret; @@ -1209,6 +1217,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) sock_release(queue->sock); kfree(queue->pdu); + mutex_destroy(&queue->queue_lock); } static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) @@ -1370,6 +1379,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, struct nvme_tcp_queue *queue = &ctrl->queues[qid]; int ret, rcv_pdu_size; + mutex_init(&queue->queue_lock); queue->ctrl = ctrl; init_llist_head(&queue->req_list); INIT_LIST_HEAD(&queue->send_list); @@ -1388,7 +1398,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (ret) { dev_err(nctrl->device, "failed to create socket: %d\n", ret); - return ret; + goto err_destroy_mutex; } /* Single syn retry */ @@ -1497,6 +1507,8 @@ err_crypto: err_sock: sock_release(queue->sock); queue->sock = NULL; +err_destroy_mutex: + mutex_destroy(&queue->queue_lock); return ret; } @@ -1524,9 +1536,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; - if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) - return; - __nvme_tcp_stop_queue(queue); + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) + __nvme_tcp_stop_queue(queue); + mutex_unlock(&queue->queue_lock); } static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) @@ -1799,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) out_wait_freeze_timed_out: nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); out_cleanup_connect_q: + nvme_cancel_tagset(ctrl); if (new) blk_cleanup_queue(ctrl->connect_q); out_free_tag_set: @@ -1862,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) error = nvme_init_identify(ctrl); if (error) - goto out_stop_queue; + goto out_quiesce_queue; return 0; +out_quiesce_queue: + blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); out_stop_queue: nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->admin_q); @@ -1888,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, blk_mq_quiesce_queue(ctrl->admin_q); blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); - if (ctrl->admin_tagset) { - blk_mq_tagset_busy_iter(ctrl->admin_tagset, - nvme_cancel_request, ctrl); - blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); - } + nvme_cancel_admin_tagset(ctrl); if (remove) blk_mq_unquiesce_queue(ctrl->admin_q); nvme_tcp_destroy_admin_queue(ctrl, remove); @@ -1908,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, nvme_stop_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); - if (ctrl->tagset) { - blk_mq_tagset_busy_iter(ctrl->tagset, - nvme_cancel_request, ctrl); - blk_mq_tagset_wait_completed_request(ctrl->tagset); - } + nvme_cancel_tagset(ctrl); if (remove) nvme_start_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); @@ -1987,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) return 0; destroy_io: - if (ctrl->queue_count > 1) + if (ctrl->queue_count > 1) { + nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); + nvme_cancel_tagset(ctrl); nvme_tcp_destroy_io_queues(ctrl, new); + } destroy_admin: + blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); nvme_tcp_destroy_admin_queue(ctrl, new); return ret; } @@ -2252,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; + if (req->curr_bio && req->data_len) + nvme_tcp_init_iter(req, rq_data_dir(rq)); if (rq_data_dir(rq) == WRITE && req->data_len <= nvme_tcp_inline_data_size(queue)) req->pdu_len = req->data_len; - else if (req->curr_bio) - nvme_tcp_init_iter(req, READ); pdu->hdr.type = nvme_tcp_cmd; pdu->hdr.flags = 0; diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 5c3cb6928f3c..6543015b6121 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p, return ret; } +static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 lbaf = cdw10[0] & 0xF; + u8 mset = (cdw10[0] >> 4) & 0x1; + u8 pi = (cdw10[0] >> 5) & 0x7; + u8 pil = cdw10[1] & 0x1; + u8 ses = (cdw10[1] >> 1) & 0x7; + + trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u", + lbaf, mset, pi, pil, ses); + + trace_seq_putc(p, 0); + + return ret; +} + static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); @@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) return ret; } +static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u8 zsa = cdw10[12]; + u8 all = cdw10[13]; + + trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u32 numd = get_unaligned_le32(cdw10 + 8); + u8 zra = cdw10[12]; + u8 zrasf = cdw10[13]; + u8 pr = cdw10[14]; + + trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u", + slba, numd, zra, zrasf, pr); + trace_seq_putc(p, 0); + + return ret; +} + static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); @@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, return nvme_trace_admin_get_features(p, cdw10); case nvme_admin_get_lba_status: return nvme_trace_get_lba_status(p, cdw10); + case nvme_admin_format_nvm: + return nvme_trace_admin_format_nvm(p, cdw10); default: return nvme_trace_common(p, cdw10); } @@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, case nvme_cmd_read: case nvme_cmd_write: case nvme_cmd_write_zeroes: + case nvme_cmd_zone_append: return nvme_trace_read_write(p, cdw10); case nvme_cmd_dsm: return nvme_trace_dsm(p, cdw10); + case nvme_cmd_zone_mgmt_send: + return nvme_trace_zone_mgmt_send(p, cdw10); + case nvme_cmd_zone_mgmt_recv: + return nvme_trace_zone_mgmt_recv(p, cdw10); default: return nvme_trace_common(p, cdw10); } diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 1dfe9a3500e3..c7e3ec561ba0 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -9,13 +9,7 @@ int nvme_revalidate_zones(struct nvme_ns *ns) { - struct request_queue *q = ns->queue; - int ret; - - ret = blk_revalidate_disk_zones(ns->disk, NULL); - if (!ret) - blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); - return ret; + return blk_revalidate_disk_zones(ns->disk, NULL); } static int nvme_set_max_append(struct nvme_ctrl *ctrl) @@ -109,10 +103,11 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) goto free_data; } - q->limits.zoned = BLK_ZONED_HM; + blk_queue_set_zoned(ns->disk, BLK_ZONED_HM); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); + blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); free_data: kfree(id); return status; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 8d90235e4fcc..bc6a774f2124 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -74,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, struct nvme_smart_log *slog) { - struct nvmet_ns *ns; u64 host_reads, host_writes, data_units_read, data_units_written; + u16 status; - ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); - if (!ns) { - pr_err("Could not find namespace id : %d\n", - le32_to_cpu(req->cmd->get_log_page.nsid)); - req->error_loc = offsetof(struct nvme_rw_command, nsid); - return NVME_SC_INVALID_NS; - } + status = nvmet_req_find_ns(req); + if (status) + return status; /* we don't have the right data for file backed ns */ - if (!ns->bdev) - goto out; + if (!req->ns->bdev) + return NVME_SC_SUCCESS; - host_reads = part_stat_read(ns->bdev, ios[READ]); + host_reads = part_stat_read(req->ns->bdev, ios[READ]); data_units_read = - DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000); - host_writes = part_stat_read(ns->bdev, ios[WRITE]); + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); + host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); data_units_written = - DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000); + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); put_unaligned_le64(host_writes, &slog->host_writes[0]); put_unaligned_le64(data_units_written, &slog->data_units_written[0]); -out: - nvmet_put_namespace(ns); return NVME_SC_SUCCESS; } @@ -468,10 +462,8 @@ out: static void nvmet_execute_identify_ns(struct nvmet_req *req) { - struct nvmet_ctrl *ctrl = req->sq->ctrl; - struct nvmet_ns *ns; struct nvme_id_ns *id; - u16 status = 0; + u16 status; if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); @@ -486,18 +478,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) } /* return an all zeroed buffer if we can't find an active namespace */ - ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); - if (!ns) + status = nvmet_req_find_ns(req); + if (status) { + status = 0; goto done; + } - nvmet_ns_revalidate(ns); + nvmet_ns_revalidate(req->ns); /* * nuse = ncap = nsze isn't always true, but we have no way to find * that out from the underlying device. */ - id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift); - switch (req->port->ana_state[ns->anagrpid]) { + id->ncap = id->nsze = + cpu_to_le64(req->ns->size >> req->ns->blksize_shift); + switch (req->port->ana_state[req->ns->anagrpid]) { case NVME_ANA_INACCESSIBLE: case NVME_ANA_PERSISTENT_LOSS: break; @@ -506,8 +501,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) break; } - if (ns->bdev) - nvmet_bdev_set_limits(ns->bdev, id); + if (req->ns->bdev) + nvmet_bdev_set_limits(req->ns->bdev, id); /* * We just provide a single LBA format that matches what the @@ -521,27 +516,28 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) * controllers, but also with any other user of the block device. */ id->nmic = (1 << 0); - id->anagrpid = cpu_to_le32(ns->anagrpid); + id->anagrpid = cpu_to_le32(req->ns->anagrpid); - memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid)); + memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); - id->lbaf[0].ds = ns->blksize_shift; + id->lbaf[0].ds = req->ns->blksize_shift; - if (ctrl->pi_support && nvmet_ns_has_pi(ns)) { + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | NVME_NS_DPC_PI_TYPE3; id->mc = NVME_MC_EXTENDED_LBA; - id->dps = ns->pi_type; + id->dps = req->ns->pi_type; id->flbas = NVME_NS_FLBAS_META_EXT; - id->lbaf[0].ms = cpu_to_le16(ns->metadata_size); + id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); } - if (ns->readonly) + if (req->ns->readonly) id->nsattr |= (1 << 0); - nvmet_put_namespace(ns); done: - status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + if (!status) + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + kfree(id); out: nvmet_req_complete(req, status); @@ -603,37 +599,32 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, static void nvmet_execute_identify_desclist(struct nvmet_req *req) { - struct nvmet_ns *ns; - u16 status = 0; off_t off = 0; + u16 status; - ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); - if (!ns) { - req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = nvmet_req_find_ns(req); + if (status) goto out; - } - if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) { + if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, NVME_NIDT_UUID_LEN, - &ns->uuid, &off); + &req->ns->uuid, &off); if (status) - goto out_put_ns; + goto out; } - if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) { + if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, NVME_NIDT_NGUID_LEN, - &ns->nguid, &off); + &req->ns->nguid, &off); if (status) - goto out_put_ns; + goto out; } if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) status = NVME_SC_INTERNAL | NVME_SC_DNR; -out_put_ns: - nvmet_put_namespace(ns); + out: nvmet_req_complete(req, status); } @@ -692,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) { u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; - u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE; + struct nvmet_subsys *subsys = nvmet_req_subsys(req); + u16 status; - req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); - if (unlikely(!req->ns)) { - req->error_loc = offsetof(struct nvme_common_command, nsid); + status = nvmet_req_find_ns(req); + if (status) return status; - } mutex_lock(&subsys->lock); switch (write_protect) { @@ -753,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) void nvmet_execute_set_features(struct nvmet_req *req) { - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; + struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); u16 status = 0; @@ -797,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req) static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) { - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; + struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 result; - req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); - if (!req->ns) { - req->error_loc = offsetof(struct nvme_common_command, nsid); - return NVME_SC_INVALID_NS | NVME_SC_DNR; - } + result = nvmet_req_find_ns(req); + if (result) + return result; + mutex_lock(&subsys->lock); if (req->ns->readonly == true) result = NVME_NS_WRITE_PROTECT; @@ -828,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req) void nvmet_execute_get_features(struct nvmet_req *req) { - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; + struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; @@ -935,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_cmd(req); - if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) + if (nvmet_req_subsys(req)->type == NVME_NQN_DISC) return nvmet_parse_discovery_cmd(req); ret = nvmet_check_ctrl_status(req, cmd); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index c61ffd767062..635a7cb45d0b 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) { if (p->enabled) pr_err("Disable port '%u' before changing attribute in %s\n", - le16_to_cpu(p->disc_addr.portid), caller); + le16_to_cpu(p->disc_addr.portid), caller); return p->enabled; } @@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item, if (strtobool(page, &val)) return -EINVAL; - if (port->enabled) { - pr_err("Disable port before setting pi_enable value.\n"); + if (nvmet_is_port_enabled(port, __func__)) return -EACCES; - } port->pi_enable = val; return count; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 8ce4d59cc9e7..67bbf0e3b507 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return status; } +u16 nvmet_report_invalid_opcode(struct nvmet_req *req) +{ + pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode, + req->sq->qid); + + req->error_loc = offsetof(struct nvme_common_command, opcode); + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; +} + static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn); @@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) cancel_delayed_work_sync(&ctrl->ka_work); } -struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) +u16 nvmet_req_find_ns(struct nvmet_req *req) { - struct nvmet_ns *ns; + u32 nsid = le32_to_cpu(req->cmd->common.nsid); - ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid)); - if (ns) - percpu_ref_get(&ns->ref); + req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid); + if (unlikely(!req->ns)) { + req->error_loc = offsetof(struct nvme_common_command, nsid); + return NVME_SC_INVALID_NS | NVME_SC_DNR; + } - return ns; + percpu_ref_get(&req->ns->ref); + return NVME_SC_SUCCESS; } static void nvmet_destroy_namespace(struct percpu_ref *ref) @@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) if (nvmet_req_passthru_ctrl(req)) return nvmet_parse_passthru_io_cmd(req); - req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); - if (unlikely(!req->ns)) { - req->error_loc = offsetof(struct nvme_common_command, nsid); - return NVME_SC_INVALID_NS | NVME_SC_DNR; - } + ret = nvmet_req_find_ns(req); + if (unlikely(ret)) + return ret; + ret = nvmet_check_ana_state(req->port, req->ns); if (unlikely(ret)) { req->error_loc = offsetof(struct nvme_common_command, nsid); @@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) if (req->ns->file) return nvmet_file_parse_io_cmd(req); - else - return nvmet_bdev_parse_io_cmd(req); + + return nvmet_bdev_parse_io_cmd(req); } bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index cd4e73aa9807..d375745fc4ed 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue { struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; + struct rcu_head rcu; struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ } __aligned(sizeof(unsigned long long)); @@ -164,9 +165,10 @@ struct nvmet_fc_tgt_assoc { struct nvmet_fc_hostport *hostport; struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; + struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; struct kref ref; struct work_struct del_work; + struct rcu_head rcu; }; @@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) { struct nvmet_fc_tgt_queue *queue; - unsigned long flags; int ret; if (qid > NVMET_NR_QUEUES) @@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, goto out_fail_iodlist; WARN_ON(assoc->queues[qid]); - spin_lock_irqsave(&assoc->tgtport->lock, flags); - assoc->queues[qid] = queue; - spin_unlock_irqrestore(&assoc->tgtport->lock, flags); + rcu_assign_pointer(assoc->queues[qid], queue); return queue; @@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref) { struct nvmet_fc_tgt_queue *queue = container_of(ref, struct nvmet_fc_tgt_queue, ref); - unsigned long flags; - spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); - queue->assoc->queues[queue->qid] = NULL; - spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); + rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); @@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref) destroy_workqueue(queue->work_q); - kfree(queue); + kfree_rcu(queue, rcu); } static void @@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_tgt_queue *queue; u64 association_id = nvmet_fc_getassociationid(connection_id); u16 qid = nvmet_fc_getqueueid(connection_id); - unsigned long flags; if (qid > NVMET_NR_QUEUES) return NULL; - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { - queue = assoc->queues[qid]; + queue = rcu_dereference(assoc->queues[qid]); if (queue && (!atomic_read(&queue->connected) || !nvmet_fc_tgt_q_get(queue))) queue = NULL; - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); return queue; } } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); return NULL; } @@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) } if (!needrandom) { assoc->association_id = ran; - list_add_tail(&assoc->a_list, &tgtport->assoc_list); + list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); } spin_unlock_irqrestore(&tgtport->lock, flags); } @@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) nvmet_fc_free_hostport(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); - list_del(&assoc->a_list); + list_del_rcu(&assoc->a_list); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); /* if pending Rcv Disconnect Association LS, send rsp now */ @@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) dev_info(tgtport->dev, "{%d:%d} Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); - kfree(assoc); + kfree_rcu(assoc, rcu); nvmet_fc_tgtport_put(tgtport); } @@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_tgt_queue *queue; - unsigned long flags; int i, terminating; terminating = atomic_xchg(&assoc->terminating, 1); @@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) if (terminating) return; - spin_lock_irqsave(&tgtport->lock, flags); + for (i = NVMET_NR_QUEUES; i >= 0; i--) { - queue = assoc->queues[i]; - if (queue) { - if (!nvmet_fc_tgt_q_get(queue)) - continue; - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_queue(queue); - nvmet_fc_tgt_q_put(queue); - spin_lock_irqsave(&tgtport->lock, flags); + rcu_read_lock(); + queue = rcu_dereference(assoc->queues[i]); + if (!queue) { + rcu_read_unlock(); + continue; + } + + if (!nvmet_fc_tgt_q_get(queue)) { + rcu_read_unlock(); + continue; } + rcu_read_unlock(); + nvmet_fc_delete_target_queue(queue); + nvmet_fc_tgt_q_put(queue); } - spin_unlock_irqrestore(&tgtport->lock, flags); dev_info(tgtport->dev, "{%d:%d} Association deleted\n", @@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, { struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *ret = NULL; - unsigned long flags; - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { ret = assoc; if (!nvmet_fc_tgt_a_get(assoc)) @@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, break; } } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); return ret; } @@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) static void __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) { - struct nvmet_fc_tgt_assoc *assoc, *next; - unsigned long flags; + struct nvmet_fc_tgt_assoc *assoc; - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry_safe(assoc, next, - &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; if (!schedule_work(&assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); } /** @@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) continue; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { - queue = assoc->queues[0]; + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { + queue = rcu_dereference(assoc->queues[0]); if (queue && queue->nvme_sq.ctrl == ctrl) { if (nvmet_fc_tgt_a_get(assoc)) found_ctrl = true; break; } } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); nvmet_fc_tgtport_put(tgtport); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 733d9363900e..54606f1872b4 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -1501,7 +1501,8 @@ static ssize_t fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int opcode, starting, amount; + unsigned int opcode; + int starting, amount; if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) return -EBADRQC; @@ -1544,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = { NULL }; -static struct attribute_group fclopp_dev_attrs_group = { +static const struct attribute_group fclopp_dev_attrs_group = { .attrs = fcloop_dev_attrs, }; @@ -1588,8 +1589,8 @@ out_destroy_class: static void __exit fcloop_exit(void) { - struct fcloop_lport *lport; - struct fcloop_nport *nport; + struct fcloop_lport *lport = NULL; + struct fcloop_nport *nport = NULL; struct fcloop_tport *tport; struct fcloop_rport *rport; unsigned long flags; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 125dde3f410e..3d9a5d3ed9cd 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) if (is_pci_p2pdma_page(sg_page(req->sg))) op |= REQ_NOMERGE; - sector = le64_to_cpu(req->cmd->rw.slba); - sector <<= (req->ns->blksize_shift - 9); + sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { bio = &req->b.inline_bio; @@ -333,7 +332,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) u16 nvmet_bdev_flush(struct nvmet_req *req) { - if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL)) + if (blkdev_issue_flush(req->ns->bdev)) return NVME_SC_INTERNAL | NVME_SC_DNR; return 0; } @@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req, int ret; ret = __blkdev_issue_discard(ns->bdev, - le64_to_cpu(range->slba) << (ns->blksize_shift - 9), + nvmet_lba_to_sect(ns, range->slba), le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), GFP_KERNEL, 0, bio); if (ret && ret != -EOPNOTSUPP) { @@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, 0)) return; - sector = le64_to_cpu(write_zeroes->slba) << - (req->ns->blksize_shift - 9); + sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << (req->ns->blksize_shift - 9)); @@ -451,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) req->execute = nvmet_bdev_execute_write_zeroes; return 0; default: - pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, - req->sq->qid); - req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return nvmet_report_invalid_opcode(req); } } diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 0abbefd9925e..715d4376c997 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) req->execute = nvmet_file_execute_write_zeroes; return 0; default: - pr_err("unhandled cmd for file ns %d on qid %d\n", - cmd->common.opcode, req->sq->qid); - req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return nvmet_report_invalid_opcode(req); } } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 592763732065..cdfa537b1c0a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, void nvmet_subsys_put(struct nvmet_subsys *subsys); void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); -struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); +u16 nvmet_req_find_ns(struct nvmet_req *req); void nvmet_put_namespace(struct nvmet_ns *ns); int nvmet_ns_enable(struct nvmet_ns *ns); void nvmet_ns_disable(struct nvmet_ns *ns); @@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req) sizeof(struct nvme_dsm_range); } +static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req) +{ + return req->sq->ctrl->subsys; +} + #ifdef CONFIG_NVME_TARGET_PASSTHRU void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); @@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) static inline struct nvme_ctrl * nvmet_req_passthru_ctrl(struct nvmet_req *req) { - return nvmet_passthru_ctrl(req->sq->ctrl->subsys); + return nvmet_passthru_ctrl(nvmet_req_subsys(req)); } u16 errno_to_nvme_status(struct nvmet_req *req, int errno); +u16 nvmet_report_invalid_opcode(struct nvmet_req *req); /* Convert a 32-bit number to a 16-bit 0's based number */ static inline __le16 to0based(u32 a) @@ -603,4 +609,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns) return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple); } +static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect) +{ + return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT)); +} + +static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) +{ + return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); +} + #endif /* _NVMET_H */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index b9776fc8f08f..f50c7b2bf21c 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) } q = ns->queue; - timeout = req->sq->ctrl->subsys->io_timeout; + timeout = nvmet_req_subsys(req)->io_timeout; } else { - timeout = req->sq->ctrl->subsys->admin_timeout; + timeout = nvmet_req_subsys(req)->admin_timeout; } rq = nvme_alloc_request(q, req->cmd, 0); @@ -275,7 +275,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) schedule_work(&req->p.work); } else { rq->end_io_data = req; - blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0, + blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0, nvmet_passthru_req_done); } @@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) return nvmet_setup_passthru_command(req); default: /* Reject commands not in the allowlist above */ - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return nvmet_report_invalid_opcode(req); } } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 5c1e7cb7fe0d..06b6b742bb21 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) } ndev->inline_data_size = nport->inline_data_size; ndev->inline_page_count = inline_page_count; + + if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags & + IB_DEVICE_INTEGRITY_HANDOVER)) { + pr_warn("T10-PI is not supported by device %s. Disabling it\n", + cm_id->device->name); + nport->pi_enable = false; + } + ndev->device = cm_id->device; kref_init(&ndev->ref); @@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) spin_lock_irqsave(&queue->state_lock, flags); switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: + while (!list_empty(&queue->rsp_wait_list)) { + struct nvmet_rdma_rsp *rsp; + + rsp = list_first_entry(&queue->rsp_wait_list, + struct nvmet_rdma_rsp, + wait_list); + list_del(&rsp->wait_list); + nvmet_rdma_put_rsp(rsp); + } + fallthrough; case NVMET_RDMA_Q_LIVE: queue->state = NVMET_RDMA_Q_DISCONNECTING; disconnect = true; @@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) goto out_destroy_id; } - if (port->nport->pi_enable && - !(cm_id->device->attrs.device_cap_flags & - IB_DEVICE_INTEGRITY_HANDOVER)) { - pr_err("T10-PI is not supported for %pISpcs\n", addr); - ret = -EINVAL; - goto out_destroy_id; - } - port->cm_id = cm_id; return 0; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index dc1f0f647189..8b0485ada315 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length = cmd->pdu_len; cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); offset = cmd->rbytes_done; - cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); + cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; sg = &cmd->req.sg[cmd->sg_idx]; @@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length -= iov_len; sg = sg_next(sg); iov++; + sg_offset = 0; } iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, @@ -378,7 +379,7 @@ err: return NVME_SC_INTERNAL; } -static void nvmet_tcp_ddgst(struct ahash_request *hash, +static void nvmet_tcp_send_ddgst(struct ahash_request *hash, struct nvmet_tcp_cmd *cmd) { ahash_request_set_crypt(hash, cmd->req.sg, @@ -386,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash, crypto_ahash_digest(hash); } +static void nvmet_tcp_recv_ddgst(struct ahash_request *hash, + struct nvmet_tcp_cmd *cmd) +{ + struct scatterlist sg; + struct kvec *iov; + int i; + + crypto_ahash_init(hash); + for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { + sg_init_one(&sg, iov->iov_base, iov->iov_len); + ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len); + crypto_ahash_update(hash); + } + ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); + crypto_ahash_final(hash); +} + static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; @@ -410,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; - nvmet_tcp_ddgst(queue->snd_hash, cmd); + nvmet_tcp_send_ddgst(queue->snd_hash, cmd); } if (cmd->queue->hdr_digest) { @@ -1059,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; - nvmet_tcp_ddgst(queue->rcv_hash, cmd); + nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; @@ -1080,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) cmd->rbytes_done += ret; } + if (queue->data_digest) { + nvmet_tcp_prep_recv_ddgst(cmd); + return 0; + } nvmet_tcp_unmap_pdu_iovec(cmd); if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) { - if (queue->data_digest) { - nvmet_tcp_prep_recv_ddgst(cmd); - return 0; - } cmd->req.execute(&cmd->req); } @@ -1467,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) if (inet->rcv_tos > 0) ip_sock_set_tos(sock->sk, inet->rcv_tos); + ret = 0; write_lock_bh(&sock->sk->sk_callback_lock); - sock->sk->sk_user_data = queue; - queue->data_ready = sock->sk->sk_data_ready; - sock->sk->sk_data_ready = nvmet_tcp_data_ready; - queue->state_change = sock->sk->sk_state_change; - sock->sk->sk_state_change = nvmet_tcp_state_change; - queue->write_space = sock->sk->sk_write_space; - sock->sk->sk_write_space = nvmet_tcp_write_space; + if (sock->sk->sk_state != TCP_ESTABLISHED) { + /* + * If the socket is already closing, don't even start + * consuming it + */ + ret = -ENOTCONN; + } else { + sock->sk->sk_user_data = queue; + queue->data_ready = sock->sk->sk_data_ready; + sock->sk->sk_data_ready = nvmet_tcp_data_ready; + queue->state_change = sock->sk->sk_state_change; + sock->sk->sk_state_change = nvmet_tcp_state_change; + queue->write_space = sock->sk->sk_write_space; + sock->sk->sk_write_space = nvmet_tcp_write_space; + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); + } write_unlock_bh(&sock->sk->sk_callback_lock); - return 0; + return ret; } static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, @@ -1525,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_destroy_sq; - queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); - return 0; out_destroy_sq: mutex_lock(&nvmet_tcp_queue_mutex); diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h index c14e3249a14d..6109b3806b12 100644 --- a/drivers/nvme/target/trace.h +++ b/drivers/nvme/target/trace.h @@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) static inline void __assign_req_name(char *name, struct nvmet_req *req) { - if (req->ns) - strncpy(name, req->ns->device_path, DISK_NAME_LEN); - else + if (!req->ns) { memset(name, 0, DISK_NAME_LEN); + return; + } + + strncpy(name, req->ns->device_path, + min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path))); } #endif |