diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 22:15:41 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 22:15:41 +0300 |
commit | 2d53943090c336c9d298638bad292be349e1b9c4 (patch) | |
tree | 18fc9bbc1ff13fe031a922cf2efc22a903257caf /drivers/nvme | |
parent | ff6814b078e33a4d26fee9ea80779c81a6744cd8 (diff) | |
parent | 00b89892c869f34528deca957b10d1468c4e8b38 (diff) | |
download | linux-2d53943090c336c9d298638bad292be349e1b9c4.tar.xz |
Merge tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"Here are the main block driver updates for 5.5. Nothing major in here,
mostly just fixes. This contains:
- a set of bcache changes via Coly
- MD changes from Song
- loop unmap write-zeroes fix (Darrick)
- spelling fixes (Geert)
- zoned additions cleanups to null_blk/dm (Ajay)
- allow null_blk online submit queue changes (Bart)
- NVMe changes via Keith, nothing major here either"
* tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block: (56 commits)
Revert "bcache: fix fifo index swapping condition in journal_pin_cmp()"
drivers/md/raid5-ppl.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
drivers/md/raid5.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
bcache: don't export symbols
bcache: remove the extra cflags for request.o
bcache: at least try to shrink 1 node in bch_mca_scan()
bcache: add idle_max_writeback_rate sysfs interface
bcache: add code comments in bch_btree_leaf_dirty()
bcache: fix deadlock in bcache_allocator
bcache: add code comment bch_keylist_pop() and bch_keylist_pop_front()
bcache: deleted code comments for dead code in bch_data_insert_keys()
bcache: add more accurate error messages in read_super()
bcache: fix static checker warning in bcache_device_free()
bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
bcache: fix fifo index swapping condition in journal_pin_cmp()
md/raid10: prevent access of uninitialized resync_pages offset
md: avoid invalid memory access for array sb->dev_roles
md/raid1: avoid soft lockup under high load
null_blk: add zone open, close, and finish support
dm: add zone open, close and finish support
...
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 24 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 49 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 13 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 20 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 6 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 16 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 133 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 20 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 70 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 15 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 31 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 43 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-file.c | 20 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 7 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 10 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 14 |
18 files changed, 289 insertions, 214 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fa7ba09dca77..c5e434c9af62 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -283,6 +283,8 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); + nvme_cleanup_cmd(req); + if (nvme_req(req)->ctrl->kas) nvme_req(req)->ctrl->comp_seen = true; @@ -313,7 +315,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) if (blk_mq_request_completed(req)) return true; - nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; blk_mq_complete_request(req); return true; } @@ -626,7 +628,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, } __rq_for_each_bio(bio, req) { - u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); + u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; if (n < segments) { @@ -667,7 +669,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.slba = - cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->write_zeroes.control = 0; @@ -691,7 +693,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) @@ -1647,7 +1649,7 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) static void nvme_set_chunk_size(struct nvme_ns *ns) { - u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); + u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob); blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); } @@ -1684,8 +1686,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) { - u32 max_sectors; - unsigned short bs = 1 << ns->lba_shift; + u64 max_blocks; if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) @@ -1701,11 +1702,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) * nvme_init_identify() if available. */ if (ns->ctrl->max_hw_sectors == UINT_MAX) - max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; + max_blocks = (u64)USHRT_MAX + 1; else - max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; + max_blocks = ns->ctrl->max_hw_sectors + 1; - blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); + blk_queue_max_write_zeroes_sectors(disk->queue, + nvme_lba_to_sect(ns, max_blocks)); } static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, @@ -1748,7 +1750,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) static void nvme_update_disk_info(struct gendisk *disk, struct nvme_ns *ns, struct nvme_id_ns *id) { - sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); + sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); unsigned short bs = 1 << ns->lba_shift; u32 atomic_bs, phys_bs, io_opt; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 265f89e11d8b..679a721ae229 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1224,7 +1224,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, lsreq->rqstlen = sizeof(*assoc_rqst); lsreq->rspaddr = assoc_acc; lsreq->rsplen = sizeof(*assoc_acc); - lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; ret = nvme_fc_send_ls_req(ctrl->rport, lsop); if (ret) @@ -1264,7 +1264,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, if (fcret) { ret = -EBADF; dev_err(ctrl->dev, - "q %d connect failed: %s\n", + "q %d Create Association LS failed: %s\n", queue->qnum, validation_errors[fcret]); } else { ctrl->association_id = @@ -1332,7 +1332,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, lsreq->rqstlen = sizeof(*conn_rqst); lsreq->rspaddr = conn_acc; lsreq->rsplen = sizeof(*conn_acc); - lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; ret = nvme_fc_send_ls_req(ctrl->rport, lsop); if (ret) @@ -1363,7 +1363,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (fcret) { ret = -EBADF; dev_err(ctrl->dev, - "q %d connect failed: %s\n", + "q %d Create I/O Connection LS failed: %s\n", queue->qnum, validation_errors[fcret]); } else { queue->connection_id = @@ -1376,7 +1376,7 @@ out_free_buffer: out_no_memory: if (ret) dev_err(ctrl->dev, - "queue %d connect command failed (%d).\n", + "queue %d connect I/O queue failed (%d).\n", queue->qnum, ret); return ret; } @@ -1413,8 +1413,8 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) static void nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) { - struct fcnvme_ls_disconnect_rqst *discon_rqst; - struct fcnvme_ls_disconnect_acc *discon_acc; + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; + struct fcnvme_ls_disconnect_assoc_acc *discon_acc; struct nvmefc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; int ret; @@ -1430,11 +1430,11 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) lsreq = &lsop->ls_req; lsreq->private = (void *)&lsop[1]; - discon_rqst = (struct fcnvme_ls_disconnect_rqst *) + discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *) (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); - discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1]; + discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; - discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT; + discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC; discon_rqst->desc_list_len = cpu_to_be32( sizeof(struct fcnvme_lsdesc_assoc_id) + sizeof(struct fcnvme_lsdesc_disconn_cmd)); @@ -1451,22 +1451,17 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) discon_rqst->discon_cmd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_disconn_cmd)); - discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION; - discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id); lsreq->rqstaddr = discon_rqst; lsreq->rqstlen = sizeof(*discon_rqst); lsreq->rspaddr = discon_acc; lsreq->rsplen = sizeof(*discon_acc); - lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, nvme_fc_disconnect_assoc_done); if (ret) kfree(lsop); - - /* only meaningful part to terminating the association */ - ctrl->association_id = 0; } @@ -1662,7 +1657,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) (freq->rcv_rsplen / 4) || be32_to_cpu(op->rsp_iu.xfrd_len) != freq->transferred_length || - op->rsp_iu.status_code || + op->rsp_iu.ersp_result || sqe->common.command_id != cqe->command_id)) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, @@ -1672,7 +1667,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), be32_to_cpu(op->rsp_iu.xfrd_len), freq->transferred_length, - op->rsp_iu.status_code, + op->rsp_iu.ersp_result, sqe->common.command_id, cqe->command_id); goto done; @@ -1731,9 +1726,14 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, op->rq = rq; op->rqno = rqno; - cmdiu->scsi_id = NVME_CMD_SCSI_ID; + cmdiu->format_id = NVME_CMD_FORMAT_ID; cmdiu->fc_id = NVME_CMD_FC_ID; cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); + if (queue->qnum) + cmdiu->rsv_cat = fccmnd_set_cat_css(0, + (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); + else + cmdiu->rsv_cat = fccmnd_set_cat_admin(0); op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); @@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); - nvme_cleanup_cmd(rq); - sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); freq->sg_cnt = 0; @@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (!(op->flags & FCOP_FLAGS_AEN)) nvme_fc_unmap_data(ctrl, op->rq, op); + nvme_cleanup_cmd(op->rq); nvme_fc_ctrl_put(ctrl); if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && @@ -2695,7 +2694,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) /* warn if maxcmd is lower than queue_size */ dev_warn(ctrl->ctrl.device, "queue_size %zu > ctrl maxcmd %u, reducing " - "to queue_size\n", + "to maxcmd\n", opts->queue_size, ctrl->ctrl.maxcmd); opts->queue_size = ctrl->ctrl.maxcmd; } @@ -2703,7 +2702,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (opts->queue_size > ctrl->ctrl.sqsize + 1) { /* warn if sqsize is lower than queue_size */ dev_warn(ctrl->ctrl.device, - "queue_size %zu > ctrl sqsize %u, clamping down\n", + "queue_size %zu > ctrl sqsize %u, reducing " + "to sqsize\n", opts->queue_size, ctrl->ctrl.sqsize + 1); opts->queue_size = ctrl->ctrl.sqsize + 1; } @@ -2739,6 +2739,7 @@ out_term_aen_ops: out_disconnect_admin_queue: /* send a Disconnect(association) LS to fc-nvme target */ nvme_fc_xmt_disconnect_assoc(ctrl); + ctrl->association_id = 0; out_delete_hw_queue: __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); out_free_queue: @@ -2830,6 +2831,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) if (ctrl->association_id) nvme_fc_xmt_disconnect_assoc(ctrl); + ctrl->association_id = 0; + if (ctrl->ctrl.tagset) { nvme_fc_delete_hw_io_queues(ctrl); nvme_fc_free_io_queues(ctrl); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index e0f064dcbd02..797c18337d96 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req) } break; case NVME_SC_HOST_PATH_ERROR: + case NVME_SC_HOST_ABORTED_CMD: /* * Temporary transport disruption in talking to the controller. * Try to send on a new path. @@ -446,8 +447,14 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { struct nvme_ana_group_desc *desc = base + offset; - u32 nr_nsids = le32_to_cpu(desc->nnsids); - size_t nsid_buf_size = nr_nsids * sizeof(__le32); + u32 nr_nsids; + size_t nsid_buf_size; + + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) + return -EINVAL; + + nr_nsids = le32_to_cpu(desc->nnsids); + nsid_buf_size = nr_nsids * sizeof(__le32); if (WARN_ON_ONCE(desc->grpid == 0)) return -EINVAL; @@ -467,8 +474,6 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, return error; offset += nsid_buf_size; - if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) - return -EINVAL; } return 0; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 22e8401352c2..fdda273cd2f6 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -419,9 +419,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); } -static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) +/* + * Convert a 512B sector number to a device logical block number. + */ +static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) +{ + return sector >> (ns->lba_shift - SECTOR_SHIFT); +} + +/* + * Convert a device logical block number to a 512B sector number. + */ +static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) { - return (sector >> (ns->lba_shift - 9)); + return lba << (ns->lba_shift - SECTOR_SHIFT); } static inline void nvme_end_request(struct request *req, __le16 status, @@ -446,6 +457,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) put_device(ctrl->device); } +static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) +{ + return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH; +} + void nvme_complete_rq(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 869f462e6b6e..3aab92bd2263 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -925,7 +925,6 @@ static void nvme_pci_complete_rq(struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_dev *dev = iod->nvmeq->dev; - nvme_cleanup_cmd(req); if (blk_integrity_rq(req)) dma_unmap_page(dev->dev, iod->meta_dma, rq_integrity_vec(req)->bv_len, rq_data_dir(req)); @@ -968,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvmeq->qid == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { + if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { nvme_complete_async_event(&nvmeq->dev->ctrl, cqe->status, &cqe->result); return; @@ -2982,7 +2980,7 @@ static int nvme_suspend(struct device *dev) /* * Clearing npss forces a controller reset on resume. The - * correct value will be resdicovered then. + * correct value will be rediscovered then. */ ret = nvme_disable_prepare_reset(ndev, true); ctrl->npss = 0; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cb4c3000a57e..dce59459ed41 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, } ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); - - nvme_cleanup_cmd(rq); sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); } @@ -1501,8 +1499,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_rdma_queue_idx(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) + if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), + cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else @@ -1768,7 +1766,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(err < 0)) { dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", err); - nvme_cleanup_cmd(rq); goto err; } @@ -1779,18 +1776,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr ? &req->reg_wr.wr : NULL); - if (unlikely(err)) { - nvme_rdma_unmap_data(queue, rq); - goto err; - } + if (unlikely(err)) + goto err_unmap; return BLK_STS_OK; +err_unmap: + nvme_rdma_unmap_data(queue, rq); err: if (err == -ENOMEM || err == -EAGAIN) ret = BLK_STS_RESOURCE; else ret = BLK_STS_IOERR; + nvme_cleanup_cmd(rq); unmap_qe: ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), DMA_TO_DEVICE); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7544be84ab35..6d43b23a0fc8 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_tcp_queue_id(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) + if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), + cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 831a062d27cb..56c21b501185 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -31,7 +31,7 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd) static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { - nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); + nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); } static void nvmet_execute_get_log_page_error(struct nvmet_req *req) @@ -134,7 +134,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) u16 status = NVME_SC_INTERNAL; unsigned long flags; - if (req->data_len != sizeof(*log)) + if (req->transfer_len != sizeof(*log)) goto out; log = kzalloc(sizeof(*log), GFP_KERNEL); @@ -196,7 +196,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) u16 status = NVME_SC_INTERNAL; size_t len; - if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) + if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) goto out; mutex_lock(&ctrl->lock); @@ -206,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) len = ctrl->nr_changed_ns * sizeof(__le32); status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); if (!status) - status = nvmet_zero_sgl(req, len, req->data_len - len); + status = nvmet_zero_sgl(req, len, req->transfer_len - len); ctrl->nr_changed_ns = 0; nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); mutex_unlock(&ctrl->lock); @@ -282,6 +282,36 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_get_log_page(struct nvmet_req *req) +{ + if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd))) + return; + + switch (req->cmd->get_log_page.lid) { + case NVME_LOG_ERROR: + return nvmet_execute_get_log_page_error(req); + case NVME_LOG_SMART: + return nvmet_execute_get_log_page_smart(req); + case NVME_LOG_FW_SLOT: + /* + * We only support a single firmware slot which always is + * active, so we can zero out the whole firmware slot log and + * still claim to fully implement this mandatory log page. + */ + return nvmet_execute_get_log_page_noop(req); + case NVME_LOG_CHANGED_NS: + return nvmet_execute_get_log_changed_ns(req); + case NVME_LOG_CMD_EFFECTS: + return nvmet_execute_get_log_cmd_effects_ns(req); + case NVME_LOG_ANA: + return nvmet_execute_get_log_page_ana(req); + } + pr_err("unhandled lid %d on qid %d\n", + req->cmd->get_log_page.lid, req->sq->qid); + req->error_loc = offsetof(struct nvme_get_log_page_command, lid); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); +} + static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -565,6 +595,28 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_identify(struct nvmet_req *req) +{ + if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE)) + return; + + switch (req->cmd->identify.cns) { + case NVME_ID_CNS_NS: + return nvmet_execute_identify_ns(req); + case NVME_ID_CNS_CTRL: + return nvmet_execute_identify_ctrl(req); + case NVME_ID_CNS_NS_ACTIVE_LIST: + return nvmet_execute_identify_nslist(req); + case NVME_ID_CNS_NS_DESC_LIST: + return nvmet_execute_identify_desclist(req); + } + + pr_err("unhandled identify cns %d on qid %d\n", + req->cmd->identify.cns, req->sq->qid); + req->error_loc = offsetof(struct nvme_identify, cns); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); +} + /* * A "minimum viable" abort implementation: the command is mandatory in the * spec, but we are not required to do any useful work. We couldn't really @@ -574,6 +626,8 @@ out: */ static void nvmet_execute_abort(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; nvmet_set_result(req, 1); nvmet_req_complete(req, 0); } @@ -658,6 +712,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_NUM_QUEUES: nvmet_set_result(req, @@ -721,6 +778,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { /* * These features are mandatory in the spec, but we don't @@ -785,6 +845,9 @@ void nvmet_execute_async_event(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; + if (!nvmet_check_data_len(req, 0)) + return; + mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); @@ -801,6 +864,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; + if (!nvmet_check_data_len(req, 0)) + return; + pr_debug("ctrl %d update keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -813,77 +879,36 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) struct nvme_command *cmd = req->cmd; u16 ret; + if (nvme_is_fabrics(cmd)) + return nvmet_parse_fabrics_cmd(req); + if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) + return nvmet_parse_discovery_cmd(req); + ret = nvmet_check_ctrl_status(req, cmd); if (unlikely(ret)) return ret; switch (cmd->common.opcode) { case nvme_admin_get_log_page: - req->data_len = nvmet_get_log_page_len(cmd); - - switch (cmd->get_log_page.lid) { - case NVME_LOG_ERROR: - req->execute = nvmet_execute_get_log_page_error; - return 0; - case NVME_LOG_SMART: - req->execute = nvmet_execute_get_log_page_smart; - return 0; - case NVME_LOG_FW_SLOT: - /* - * We only support a single firmware slot which always - * is active, so we can zero out the whole firmware slot - * log and still claim to fully implement this mandatory - * log page. - */ - req->execute = nvmet_execute_get_log_page_noop; - return 0; - case NVME_LOG_CHANGED_NS: - req->execute = nvmet_execute_get_log_changed_ns; - return 0; - case NVME_LOG_CMD_EFFECTS: - req->execute = nvmet_execute_get_log_cmd_effects_ns; - return 0; - case NVME_LOG_ANA: - req->execute = nvmet_execute_get_log_page_ana; - return 0; - } - break; + req->execute = nvmet_execute_get_log_page; + return 0; case nvme_admin_identify: - req->data_len = NVME_IDENTIFY_DATA_SIZE; - switch (cmd->identify.cns) { - case NVME_ID_CNS_NS: - req->execute = nvmet_execute_identify_ns; - return 0; - case NVME_ID_CNS_CTRL: - req->execute = nvmet_execute_identify_ctrl; - return 0; - case NVME_ID_CNS_NS_ACTIVE_LIST: - req->execute = nvmet_execute_identify_nslist; - return 0; - case NVME_ID_CNS_NS_DESC_LIST: - req->execute = nvmet_execute_identify_desclist; - return 0; - } - break; + req->execute = nvmet_execute_identify; + return 0; case nvme_admin_abort_cmd: req->execute = nvmet_execute_abort; - req->data_len = 0; return 0; case nvme_admin_set_features: req->execute = nvmet_execute_set_features; - req->data_len = 0; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_get_features; - req->data_len = 0; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; - req->data_len = 0; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; - req->data_len = 0; return 0; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 3a67e244e568..28438b833c1b 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -892,14 +892,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, } if (unlikely(!req->sq->ctrl)) - /* will return an error for any Non-connect command: */ + /* will return an error for any non-connect command: */ status = nvmet_parse_connect_cmd(req); else if (likely(req->sq->qid != 0)) status = nvmet_parse_io_cmd(req); - else if (nvme_is_fabrics(req->cmd)) - status = nvmet_parse_fabrics_cmd(req); - else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) - status = nvmet_parse_discovery_cmd(req); else status = nvmet_parse_admin_cmd(req); @@ -930,15 +926,17 @@ void nvmet_req_uninit(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_uninit); -void nvmet_req_execute(struct nvmet_req *req) +bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len) { - if (unlikely(req->data_len != req->transfer_len)) { + if (unlikely(data_len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); - } else - req->execute(req); + return false; + } + + return true; } -EXPORT_SYMBOL_GPL(nvmet_req_execute); +EXPORT_SYMBOL_GPL(nvmet_check_data_len); int nvmet_req_alloc_sgl(struct nvmet_req *req) { @@ -966,7 +964,7 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req) } req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt); - if (!req->sg) + if (unlikely(!req->sg)) return -ENOMEM; return 0; diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 3764a8900850..0c2274b21e15 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req) return entries; } -static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) +static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -171,6 +171,16 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) u16 status = 0; void *buffer; + if (!nvmet_check_data_len(req, data_len)) + return; + + if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { + req->error_loc = + offsetof(struct nvme_get_log_page_command, lid); + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + goto out; + } + /* Spec requires dword aligned offsets */ if (offset & 0x3) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; @@ -227,20 +237,35 @@ out: nvmet_req_complete(req, status); } -static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req) +static void nvmet_execute_disc_identify(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; + const char model[] = "Linux"; u16 status = 0; + if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE)) + return; + + if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { + req->error_loc = offsetof(struct nvme_identify, cns); + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + goto out; + } + id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } + memset(id->sn, ' ', sizeof(id->sn)); + bin2hex(id->sn, &ctrl->subsys->serial, + min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); memset(id->fr, ' ', sizeof(id->fr)); - strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); + memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); + memcpy_and_pad(id->fr, sizeof(id->fr), + UTS_RELEASE, strlen(UTS_RELEASE), ' '); /* no limit on data transfer sizes for now */ id->mdts = 0; @@ -273,6 +298,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_KATO: stat = nvmet_set_feat_kato(req); @@ -296,6 +324,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat = 0; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_KATO: nvmet_get_feat_kato(req); @@ -328,47 +359,22 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) switch (cmd->common.opcode) { case nvme_admin_set_features: req->execute = nvmet_execute_disc_set_features; - req->data_len = 0; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_disc_get_features; - req->data_len = 0; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; - req->data_len = 0; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; - req->data_len = 0; return 0; case nvme_admin_get_log_page: - req->data_len = nvmet_get_log_page_len(cmd); - - switch (cmd->get_log_page.lid) { - case NVME_LOG_DISC: - req->execute = nvmet_execute_get_disc_log_page; - return 0; - default: - pr_err("unsupported get_log_page lid %d\n", - cmd->get_log_page.lid); - req->error_loc = - offsetof(struct nvme_get_log_page_command, lid); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } + req->execute = nvmet_execute_disc_get_log_page; + return 0; case nvme_admin_identify: - req->data_len = NVME_IDENTIFY_DATA_SIZE; - switch (cmd->identify.cns) { - case NVME_ID_CNS_CTRL: - req->execute = - nvmet_execute_identify_disc_ctrl; - return 0; - default: - pr_err("unsupported identify cns %d\n", - cmd->identify.cns); - req->error_loc = offsetof(struct nvme_identify, cns); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } + req->execute = nvmet_execute_disc_identify; + return 0; default: pr_err("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d16b55ffe79f..f7297473d9eb 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) u64 val = le64_to_cpu(req->cmd->prop_set.value); u16 status = 0; + if (!nvmet_check_data_len(req, 0)) + return; + if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); @@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) u16 status = 0; u64 val = 0; + if (!nvmet_check_data_len(req, 0)) + return; + if (req->cmd->prop_get.attrib & 1) { switch (le32_to_cpu(req->cmd->prop_get.offset)) { case NVME_REG_CAP: @@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) switch (cmd->fabrics.fctype) { case nvme_fabrics_type_property_set: - req->data_len = 0; req->execute = nvmet_execute_prop_set; break; case nvme_fabrics_type_property_get: - req->data_len = 0; req->execute = nvmet_execute_prop_get; break; default: @@ -147,6 +151,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmet_ctrl *ctrl = NULL; u16 status = 0; + if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) + return; + d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; @@ -211,6 +218,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) u16 qid = le16_to_cpu(c->qid); u16 status = 0; + if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) + return; + d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; @@ -281,7 +291,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } - req->data_len = sizeof(struct nvmf_connect_data); if (cmd->connect.qid == 0) req->execute = nvmet_execute_admin_connect; else diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ce8d819f86cc..a0db6371b43e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1495,20 +1495,20 @@ static void nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_disconnect_rqst *rqst = - (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; - struct fcnvme_ls_disconnect_acc *acc = - (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf; + struct fcnvme_ls_disconnect_assoc_acc *acc = + (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf; struct nvmet_fc_tgt_assoc *assoc; int ret = 0; memset(acc, 0, sizeof(*acc)); - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) + if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst)) ret = VERR_DISCONN_LEN; else if (rqst->desc_list_len != fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_rqst))) + sizeof(struct fcnvme_ls_disconnect_assoc_rqst))) ret = VERR_DISCONN_RQST_LEN; else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) ret = VERR_ASSOC_ID; @@ -1523,8 +1523,11 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_disconn_cmd))) ret = VERR_DISCONN_CMD_LEN; - else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && - (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) + /* + * As the standard changed on the LS, check if old format and scope + * something other than Association (e.g. 0). + */ + else if (rqst->discon_cmd.rsvd8[0]) ret = VERR_DISCONN_SCOPE; else { /* match an active association */ @@ -1556,8 +1559,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_acc)), - FCNVME_LS_DISCONNECT); + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), + FCNVME_LS_DISCONNECT_ASSOC); /* release get taken in nvmet_fc_find_target_assoc */ nvmet_fc_tgt_a_put(iod->assoc); @@ -1632,7 +1635,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, /* Creates an IO Queue/Connection */ nvmet_fc_ls_create_connection(tgtport, iod); break; - case FCNVME_LS_DISCONNECT: + case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */ nvmet_fc_ls_disconnect(tgtport, iod); break; @@ -2015,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) } /* data transfer complete, resume with nvmet layer */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: @@ -2231,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); return; transport_error: @@ -2299,7 +2302,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || - (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || + (cmdiu->format_id != NVME_CMD_FORMAT_ID) || (cmdiu->fc_id != NVME_CMD_FC_ID) || (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) return -EIO; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 32008d85172b..b6fca0e421ef 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -147,8 +147,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) int sg_cnt = req->sg_cnt; struct bio *bio; struct scatterlist *sg; + struct blk_plug plug; sector_t sector; - int op, op_flags = 0, i; + int op, i; + + if (!nvmet_check_data_len(req, nvmet_rw_len(req))) + return; if (!req->sg_cnt) { nvmet_req_complete(req, 0); @@ -156,21 +160,20 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } if (req->cmd->rw.opcode == nvme_cmd_write) { - op = REQ_OP_WRITE; - op_flags = REQ_SYNC | REQ_IDLE; + op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) - op_flags |= REQ_FUA; + op |= REQ_FUA; } else { op = REQ_OP_READ; } if (is_pci_p2pdma_page(sg_page(req->sg))) - op_flags |= REQ_NOMERGE; + op |= REQ_NOMERGE; sector = le64_to_cpu(req->cmd->rw.slba); sector <<= (req->ns->blksize_shift - 9); - if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) { + if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { bio = &req->b.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); } else { @@ -180,8 +183,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio_set_op_attrs(bio, op, op_flags); + bio->bi_opf = op; + blk_start_plug(&plug); for_each_sg(req->sg, sg, req->sg_cnt, i) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { @@ -190,7 +194,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; - bio_set_op_attrs(bio, op, op_flags); + bio->bi_opf = op; bio_chain(bio, prev); submit_bio(prev); @@ -201,12 +205,16 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } submit_bio(bio); + blk_finish_plug(&plug); } static void nvmet_bdev_execute_flush(struct nvmet_req *req) { struct bio *bio = &req->b.inline_bio; + if (!nvmet_check_data_len(req, 0)) + return; + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_set_dev(bio, req->ns->bdev); bio->bi_private = req; @@ -261,12 +269,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - if (status) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); - } else { + if (status) + bio_io_error(bio); + else submit_bio(bio); - } } else { nvmet_req_complete(req, status); } @@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) static void nvmet_bdev_execute_dsm(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, nvmet_dsm_len(req))) + return; + switch (le32_to_cpu(req->cmd->dsm.attributes)) { case NVME_DSMGMT_AD: nvmet_bdev_execute_discard(req); @@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) sector_t nr_sector; int ret; + if (!nvmet_check_data_len(req, 0)) + return; + sector = le64_to_cpu(write_zeroes->slba) << (req->ns->blksize_shift - 9); nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << @@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; - req->data_len = nvmet_rw_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_bdev_execute_flush; - req->data_len = 0; return 0; case nvme_cmd_dsm: req->execute = nvmet_bdev_execute_dsm; - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * - sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; - req->data_len = 0; return 0; default: pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 05453f5d1448..caebfce06605 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) mempool_free(req->f.bvec, req->ns->bvec_pool); } - if (unlikely(ret != req->data_len)) + if (unlikely(ret != req->transfer_len)) status = errno_to_nvme_status(req, ret); nvmet_req_complete(req, status); } @@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) is_sync = true; pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; - if (unlikely(pos + req->data_len > req->ns->size)) { + if (unlikely(pos + req->transfer_len > req->ns->size)) { nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); return true; } @@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) nr_bvec--; } - if (WARN_ON_ONCE(total_len != req->data_len)) { + if (WARN_ON_ONCE(total_len != req->transfer_len)) { ret = -EIO; goto complete; } @@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) { ssize_t nr_bvec = req->sg_cnt; + if (!nvmet_check_data_len(req, nvmet_rw_len(req))) + return; + if (!req->sg_cnt || !nr_bvec) { nvmet_req_complete(req, 0); return; @@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w) static void nvmet_file_execute_flush(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; INIT_WORK(&req->f.work, nvmet_file_flush_work); schedule_work(&req->f.work); } @@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w) static void nvmet_file_execute_dsm(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, nvmet_dsm_len(req))) + return; INIT_WORK(&req->f.work, nvmet_file_dsm_work); schedule_work(&req->f.work); } @@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w) static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); schedule_work(&req->f.work); } @@ -371,20 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_file_execute_rw; - req->data_len = nvmet_rw_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_file_execute_flush; - req->data_len = 0; return 0; case nvme_cmd_dsm: req->execute = nvmet_file_execute_dsm; - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * - sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_file_execute_write_zeroes; - req->data_len = 0; return 0; default: pr_err("unhandled cmd for file ns %d on qid %d\n", diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 11f5aea97d1b..a758bb3d5dd4 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req) { struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); - nvme_cleanup_cmd(req); sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); nvme_complete_rq(req); } @@ -102,8 +101,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_loop_queue_idx(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { + if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue), + cqe->command_id))) { nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); } else { @@ -126,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work) struct nvme_loop_iod *iod = container_of(work, struct nvme_loop_iod, work); - nvmet_req_execute(&iod->req); + iod->req.execute(&iod->req); } static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index c51f8dd01dc4..46df45e837c9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -304,8 +304,6 @@ struct nvmet_req { } f; }; int sg_cnt; - /* data length as parsed from the command: */ - size_t data_len; /* data length as parsed from the SGL descriptor: */ size_t transfer_len; @@ -375,7 +373,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); -void nvmet_req_execute(struct nvmet_req *req); +bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); void nvmet_req_complete(struct nvmet_req *req, u16 status); int nvmet_req_alloc_sgl(struct nvmet_req *req); void nvmet_req_free_sgl(struct nvmet_req *req); @@ -495,6 +493,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req) req->ns->blksize_shift; } +static inline u32 nvmet_dsm_len(struct nvmet_req *req) +{ + return (le32_to_cpu(req->cmd->dsm.nr) + 1) * + sizeof(struct nvme_dsm_range); +} + u16 errno_to_nvme_status(struct nvmet_req *req, int errno); /* Convert a 32-bit number to a 16-bit 0's based number */ diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 36d906a7f70d..37d262a65877 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) return; } - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, @@ -672,13 +672,13 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, return 0; ret = nvmet_req_alloc_sgl(&rsp->req); - if (ret < 0) + if (unlikely(ret < 0)) goto error_out; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, nvmet_data_dir(&rsp->req)); - if (ret < 0) + if (unlikely(ret < 0)) goto error_out; rsp->n_rdma += ret; @@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } return true; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index d535080b781f..af674fc0bb1e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -320,7 +320,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; u32 len = le32_to_cpu(sgl->length); - if (!cmd->req.data_len) + if (!len) return 0; if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | @@ -813,13 +813,11 @@ free_crypto: static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) { + size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); int ret; - /* recover the expected data transfer length */ - req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); - if (!nvme_is_write(cmd->req.cmd) || - req->data_len > cmd->req.port->inline_data_size) { + data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return; } @@ -932,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) goto out; } - nvmet_req_execute(&queue->cmd->req); + queue->cmd->req.execute(&queue->cmd->req); out: nvmet_prepare_receive_pdu(queue); return ret; @@ -1052,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); } nvmet_prepare_receive_pdu(queue); @@ -1092,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); ret = 0; out: nvmet_prepare_receive_pdu(queue); |