From e346ab343f4f58c12a96725c7b13df9cc2ad56f6 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 19 May 2014 17:44:22 +0300 Subject: Target/iser: Bail from accept_np if np_thread is trying to close In case np_thread state is in RESET/SHUTDOWN/EXIT states, no point for isert to stall there as we may get a hang in case no one will wake it up later. Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1710465faaf..7676bcb7105a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -3154,9 +3154,14 @@ accept_wait: return -ENODEV; spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); + pr_debug("np_thread_state %d for isert_accept_np\n", + np->np_thread_state); + /** + * No point in stalling here when np_thread + * is in state RESET/SHUTDOWN/EXIT - bail + **/ return -ENODEV; } spin_unlock_bh(&np->np_thread_lock); -- cgit v1.2.3 From 9d49f5e284e700576f3b65f1e28dea8539da6661 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 19 May 2014 17:44:23 +0300 Subject: Target/iser: Fix hangs in connection teardown In ungraceful teardowns isert close flows seem racy such that isert_wait_conn hangs as RDMA_CM_EVENT_DISCONNECTED never gets invoked (no one called rdma_disconnect). Both graceful and ungraceful teardowns will have rx flush errors (isert posts a batch once connection is established). Once all flush errors are consumed we invoke isert_wait_conn and it will be responsible for calling rdma_disconnect. This way it can be sure that rdma_disconnect was called and it won't wait forever. This patch also removes the logout_posted indicator. either the logout completion was consumed and no problem decrementing the post_send_buf_count, or it was consumed as a flush error. no point of keeping it for isert_wait_conn as there is no danger that isert_conn will be accidentally removed while it is running. (Drop unnecessary sleep_on_conn_wait_comp check in isert_cq_rx_comp_err - nab) Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 31 ++++++++++--------------------- drivers/infiniband/ulp/isert/ib_isert.h | 1 - 2 files changed, 10 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 7676bcb7105a..ef73a38bcb6b 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -787,14 +787,10 @@ isert_disconnect_work(struct work_struct *work) isert_put_conn(isert_conn); return; } - if (!isert_conn->logout_posted) { - pr_debug("Calling rdma_disconnect for !logout_posted from" - " isert_disconnect_work\n"); - rdma_disconnect(isert_conn->conn_cm_id); - mutex_unlock(&isert_conn->conn_mutex); - iscsit_cause_connection_reinstatement(isert_conn->conn, 0); - goto wake_up; - } + + /* Send DREQ/DREP towards our initiator */ + rdma_disconnect(isert_conn->conn_cm_id); + mutex_unlock(&isert_conn->conn_mutex); wake_up: @@ -1822,11 +1818,8 @@ isert_do_control_comp(struct work_struct *work) break; case ISTATE_SEND_LOGOUTRSP: pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); - /* - * Call atomic_dec(&isert_conn->post_send_buf_count) - * from isert_wait_conn() - */ - isert_conn->logout_posted = true; + + atomic_dec(&isert_conn->post_send_buf_count); iscsit_logout_post_handler(cmd, cmd->conn); break; case ISTATE_SEND_TEXTRSP: @@ -2032,6 +2025,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn) isert_conn->state = ISER_CONN_DOWN; mutex_unlock(&isert_conn->conn_mutex); + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + complete(&isert_conn->conn_wait_comp_err); } @@ -3211,15 +3206,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) struct isert_conn *isert_conn = conn->context; pr_debug("isert_wait_conn: Starting \n"); - /* - * Decrement post_send_buf_count for special case when called - * from isert_do_control_comp() -> iscsit_logout_post_handler() - */ - mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->logout_posted) - atomic_dec(&isert_conn->post_send_buf_count); - if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { + mutex_lock(&isert_conn->conn_mutex); + if (isert_conn->conn_cm_id) { pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); rdma_disconnect(isert_conn->conn_cm_id); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index da6612e68000..a2e926452f76 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -116,7 +116,6 @@ struct isert_device; struct isert_conn { enum iser_conn_state state; - bool logout_posted; int post_recv_buf_count; atomic_t post_send_buf_count; u32 responder_resources; -- cgit v1.2.3 From 88c4015fda6d014392f76d3b1688347950d7a12d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 19 May 2014 17:44:24 +0300 Subject: Target/iser: Improve cm events handling There are 4 RDMA_CM events that all basically mean that the user should teardown the IB connection: - DISCONNECTED - ADDR_CHANGE - DEVICE_REMOVAL - TIMEWAIT_EXIT Only in DISCONNECTED/ADDR_CHANGE it makes sense to call rdma_disconnect (send DREQ/DREP to our initiator). So we keep the same teardown handler for all of them but only indicate calling rdma_disconnect for the relevant events. This patch also removes redundant debug prints for each single event. v2 changes: - Call isert_disconnected_handler() for DEVICE_REMOVAL (Or + Sag) Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 26 ++++++++++++++------------ drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ef73a38bcb6b..ef47ff66f8df 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -788,8 +788,10 @@ isert_disconnect_work(struct work_struct *work) return; } - /* Send DREQ/DREP towards our initiator */ - rdma_disconnect(isert_conn->conn_cm_id); + if (isert_conn->disconnect) { + /* Send DREQ/DREP towards our initiator */ + rdma_disconnect(isert_conn->conn_cm_id); + } mutex_unlock(&isert_conn->conn_mutex); @@ -799,10 +801,11 @@ wake_up: } static void -isert_disconnected_handler(struct rdma_cm_id *cma_id) +isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) { struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + isert_conn->disconnect = disconnect; INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); schedule_work(&isert_conn->conn_logout_work); } @@ -811,29 +814,28 @@ static int isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; + bool disconnect = false; pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: - pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); ret = isert_connect_request(cma_id, event); break; case RDMA_CM_EVENT_ESTABLISHED: - pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); isert_connected_handler(cma_id); break; - case RDMA_CM_EVENT_DISCONNECTED: - pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); - isert_disconnected_handler(cma_id); - break; - case RDMA_CM_EVENT_DEVICE_REMOVAL: - case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ + disconnect = true; + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ + isert_disconnected_handler(cma_id, disconnect); break; case RDMA_CM_EVENT_CONNECT_ERROR: default: - pr_err("Unknown RDMA CMA event: %d\n", event->event); + pr_err("Unhandled RDMA CMA event: %d\n", event->event); break; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index a2e926452f76..04f51f7bf614 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -150,6 +150,7 @@ struct isert_conn { #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; + bool disconnect; }; #define ISERT_MAX_CQ 64 -- cgit v1.2.3 From f5ebec9629cf78eeeea4b8258882a9f439ab2404 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 19 May 2014 17:44:25 +0300 Subject: Target/iser: Wait for proper cleanup before unloading disconnected_handler works are scheduled on system_wq. When attempting to unload, first make sure all works have cleaned up. Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ef47ff66f8df..c68795194e25 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -3287,6 +3287,7 @@ destroy_rx_wq: static void __exit isert_exit(void) { + flush_scheduled_work(); destroy_workqueue(isert_comp_wq); destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); -- cgit v1.2.3 From 5f80ff8eccba50832dcc640ac89add4c7fced963 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 20 May 2014 13:28:11 +0300 Subject: Target/iser: Gracefully reject T10-PI enabled connect request if not supported In case user chose to set T10-PI enable on the target while the IB device does not support it, gracefully reject the request. Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.15+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c68795194e25..b622783ab198 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; if (pi_support && !device->pi_capable) { - pr_err("Protection information requested but not supported\n"); - ret = -EINVAL; + pr_err("Protection information requested but not supported, " + "rejecting connect request\n"); + ret = rdma_reject(cma_id, NULL, 0); goto out_mr; } -- cgit v1.2.3 From 5a01d08217e39f3d36d3ca160361c7b019ff1598 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Feb 2014 18:34:08 -0800 Subject: vhost/scsi: Move sanity check into vhost_scsi_map_iov_to_sgl Move the overflow check for sgl_count > TCM_VHOST_PREALLOC_SGLS into vhost_scsi_map_iov_to_sgl() so that it's based on the total number of SGLs for all IOVs, instead of single IOVs. Also, rename TCM_VHOST_PREALLOC_PAGES -> TCM_VHOST_PREALLOC_UPAGES to better describe pointers to user-space pages. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/scsi.c | 59 ++++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index cf50ce93975b..ae434db2d384 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -57,7 +57,7 @@ #define TCM_VHOST_MAX_CDB_SIZE 32 #define TCM_VHOST_DEFAULT_TAGS 256 #define TCM_VHOST_PREALLOC_SGLS 2048 -#define TCM_VHOST_PREALLOC_PAGES 2048 +#define TCM_VHOST_PREALLOC_UPAGES 2048 struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ @@ -767,35 +767,28 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, struct scatterlist *sgl, unsigned int sgl_count, struct iovec *iov, - int write) + struct page **pages, + bool write) { unsigned int npages = 0, pages_nr, offset, nbytes; struct scatterlist *sg = sgl; void __user *ptr = iov->iov_base; size_t len = iov->iov_len; - struct page **pages; int ret, i; - if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { - pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than" - " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", - sgl_count, TCM_VHOST_PREALLOC_SGLS); - return -ENOBUFS; - } - pages_nr = iov_num_pages(iov); - if (pages_nr > sgl_count) + if (pages_nr > sgl_count) { + pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" + " sgl_count: %u\n", pages_nr, sgl_count); return -ENOBUFS; - - if (pages_nr > TCM_VHOST_PREALLOC_PAGES) { + } + if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) { pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" - " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n", - pages_nr, TCM_VHOST_PREALLOC_PAGES); + " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", + pages_nr, TCM_VHOST_PREALLOC_UPAGES); return -ENOBUFS; } - pages = tv_cmd->tvc_upages; - ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); /* No pages were pinned */ if (ret < 0) @@ -825,33 +818,32 @@ out: static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, struct iovec *iov, - unsigned int niov, - int write) + int niov, + bool write) { - int ret; - unsigned int i; - u32 sgl_count; - struct scatterlist *sg; + struct scatterlist *sg = cmd->tvc_sgl; + unsigned int sgl_count = 0; + int ret, i; - /* - * Find out how long sglist needs to be - */ - sgl_count = 0; for (i = 0; i < niov; i++) sgl_count += iov_num_pages(&iov[i]); - /* TODO overflow checking */ + if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { + pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" + " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", + sgl_count, TCM_VHOST_PREALLOC_SGLS); + return -ENOBUFS; + } - sg = cmd->tvc_sgl; pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); sg_init_table(sg, sgl_count); - cmd->tvc_sgl_count = sgl_count; - pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); + pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); + for (i = 0; i < niov; i++) { ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], - write); + cmd->tvc_upages, write); if (ret < 0) { for (i = 0; i < cmd->tvc_sgl_count; i++) put_page(sg_page(&cmd->tvc_sgl[i])); @@ -859,7 +851,6 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, cmd->tvc_sgl_count = 0; return ret; } - sg += ret; sgl_count -= ret; } @@ -1765,7 +1756,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, } tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * - TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL); + TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); if (!tv_cmd->tvc_upages) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_upages\n"); -- cgit v1.2.3 From b1935f687bb93b207ef690f7debc0e9921fc484f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Feb 2014 18:08:24 -0800 Subject: vhost/scsi: Add preallocation of protection SGLs This patch updates tcm_vhost_make_nexus() to pre-allocate per descriptor tcm_vhost_cmd->tvc_prot_sgl[] used to expose protection SGLs from within virtio-scsi guest memory to vhost-scsi. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/scsi.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index ae434db2d384..30402e618078 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -58,6 +58,7 @@ #define TCM_VHOST_DEFAULT_TAGS 256 #define TCM_VHOST_PREALLOC_SGLS 2048 #define TCM_VHOST_PREALLOC_UPAGES 2048 +#define TCM_VHOST_PREALLOC_PROT_SGLS 512 struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ @@ -83,6 +84,7 @@ struct tcm_vhost_cmd { u32 tvc_lun; /* Pointer to the SGL formatted memory from virtio-scsi */ struct scatterlist *tvc_sgl; + struct scatterlist *tvc_prot_sgl; struct page **tvc_upages; /* Pointer to response */ struct virtio_scsi_cmd_resp __user *tvc_resp; @@ -722,7 +724,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_cmd *cmd; struct tcm_vhost_nexus *tv_nexus; struct se_session *se_sess; - struct scatterlist *sg; + struct scatterlist *sg, *prot_sg; struct page **pages; int tag; @@ -741,10 +743,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; sg = cmd->tvc_sgl; + prot_sg = cmd->tvc_prot_sgl; pages = cmd->tvc_upages; memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); cmd->tvc_sgl = sg; + cmd->tvc_prot_sgl = prot_sg; cmd->tvc_upages = pages; cmd->tvc_se_cmd.map_tag = tag; cmd->tvc_tag = v_req->tag; @@ -1703,6 +1707,7 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; kfree(tv_cmd->tvc_sgl); + kfree(tv_cmd->tvc_prot_sgl); kfree(tv_cmd->tvc_upages); } } @@ -1762,6 +1767,14 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, pr_err("Unable to allocate tv_cmd->tvc_upages\n"); goto out; } + + tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * + TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); + if (!tv_cmd->tvc_prot_sgl) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); + goto out; + } } /* * Since we are running in 'demo mode' this call with generate a -- cgit v1.2.3 From e31885dd901e80d5bd528c1cbedde07ebbf051b2 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Feb 2014 18:34:43 -0800 Subject: vhost/scsi: Add T10 PI IOV -> SGL memory mapping logic This patch adds vhost_scsi_map_iov_to_prot() to perform the mapping of T10 data integrity memory between virtio iov + struct scatterlist using get_user_pages_fast() following existing code. As with vhost_scsi_map_iov_to_sgl(), this does sanity checks against the total prot_sgl_count vs. pre-allocated SGLs, and loops across protection iovs using vhost_scsi_map_to_sgl() to perform the actual memory mapping. Also update tcm_vhost_release_cmd() to release associated tvc_prot_sgl[] struct page. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: H. Peter Anvin Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/scsi.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 30402e618078..eabcf1875831 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -80,6 +80,7 @@ struct tcm_vhost_cmd { u64 tvc_tag; /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; + u32 tvc_prot_sgl_count; /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ u32 tvc_lun; /* Pointer to the SGL formatted memory from virtio-scsi */ @@ -458,12 +459,16 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, struct tcm_vhost_cmd, tvc_se_cmd); struct se_session *se_sess = se_cmd->se_sess; + int i; if (tv_cmd->tvc_sgl_count) { - u32 i; for (i = 0; i < tv_cmd->tvc_sgl_count; i++) put_page(sg_page(&tv_cmd->tvc_sgl[i])); } + if (tv_cmd->tvc_prot_sgl_count) { + for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); + } tcm_vhost_put_inflight(tv_cmd->inflight); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); @@ -861,6 +866,47 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, return 0; } +static int +vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, + struct iovec *iov, + int niov, + bool write) +{ + struct scatterlist *prot_sg = cmd->tvc_prot_sgl; + unsigned int prot_sgl_count = 0; + int ret, i; + + for (i = 0; i < niov; i++) + prot_sgl_count += iov_num_pages(&iov[i]); + + if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { + pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" + " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", + prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); + return -ENOBUFS; + } + + pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, + prot_sg, prot_sgl_count); + sg_init_table(prot_sg, prot_sgl_count); + cmd->tvc_prot_sgl_count = prot_sgl_count; + + for (i = 0; i < niov; i++) { + ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], + cmd->tvc_upages, write); + if (ret < 0) { + for (i = 0; i < cmd->tvc_prot_sgl_count; i++) + put_page(sg_page(&cmd->tvc_prot_sgl[i])); + + cmd->tvc_prot_sgl_count = 0; + return ret; + } + prot_sg += ret; + prot_sgl_count -= ret; + } + return 0; +} + static void tcm_vhost_submission_work(struct work_struct *work) { struct tcm_vhost_cmd *cmd = -- cgit v1.2.3 From 95e7c4341b8e28dae5204378087c1e2a115abc82 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Feb 2014 18:22:31 -0800 Subject: vhost/scsi: Enable T10 PI IOV -> SGL memory mapping This patch updates vhost_scsi_handle_vq() to check for the existance of virtio_scsi_cmd_req_pi comparing vq->iov[0].iov_len in order to calculate seperate data + protection SGLs from data_num. Also update tcm_vhost_submission_work() to pass the pre-allocated cmd->tvc_prot_sgl[] memory into target_submit_cmd_map_sgls(), and update vhost_scsi_get_tag() parameters to accept scsi_tag, lun, and task_attr. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: H. Peter Anvin Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/scsi.c | 183 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 124 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index eabcf1875831..667e72d46998 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -169,7 +169,8 @@ enum { }; enum { - VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) + VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | + (1ULL << VIRTIO_SCSI_F_T10_PI) }; #define VHOST_SCSI_MAX_TARGET 256 @@ -720,11 +721,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) } static struct tcm_vhost_cmd * -vhost_scsi_get_tag(struct vhost_virtqueue *vq, - struct tcm_vhost_tpg *tpg, - struct virtio_scsi_cmd_req *v_req, - u32 exp_data_len, - int data_direction) +vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, + unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, + u32 exp_data_len, int data_direction) { struct tcm_vhost_cmd *cmd; struct tcm_vhost_nexus *tv_nexus; @@ -756,13 +755,16 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, cmd->tvc_prot_sgl = prot_sg; cmd->tvc_upages = pages; cmd->tvc_se_cmd.map_tag = tag; - cmd->tvc_tag = v_req->tag; - cmd->tvc_task_attr = v_req->task_attr; + cmd->tvc_tag = scsi_tag; + cmd->tvc_lun = lun; + cmd->tvc_task_attr = task_attr; cmd->tvc_exp_data_len = exp_data_len; cmd->tvc_data_direction = data_direction; cmd->tvc_nexus = tv_nexus; cmd->inflight = tcm_vhost_get_inflight(vq); + memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); + return cmd; } @@ -913,18 +915,17 @@ static void tcm_vhost_submission_work(struct work_struct *work) container_of(work, struct tcm_vhost_cmd, work); struct tcm_vhost_nexus *tv_nexus; struct se_cmd *se_cmd = &cmd->tvc_se_cmd; - struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; - int rc, sg_no_bidi = 0; + struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; + int rc; + /* FIXME: BIDI operation */ if (cmd->tvc_sgl_count) { sg_ptr = cmd->tvc_sgl; -/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ -#if 0 - if (se_cmd->se_cmd_flags & SCF_BIDI) { - sg_bidi_ptr = NULL; - sg_no_bidi = 0; - } -#endif + + if (cmd->tvc_prot_sgl_count) + sg_prot_ptr = cmd->tvc_prot_sgl; + else + se_cmd->prot_pto = true; } else { sg_ptr = NULL; } @@ -935,7 +936,7 @@ static void tcm_vhost_submission_work(struct work_struct *work) cmd->tvc_lun, cmd->tvc_exp_data_len, cmd->tvc_task_attr, cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, - sg_bidi_ptr, sg_no_bidi, NULL, 0); + NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); if (rc < 0) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -967,12 +968,18 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { struct tcm_vhost_tpg **vs_tpg; struct virtio_scsi_cmd_req v_req; + struct virtio_scsi_cmd_req_pi v_req_pi; struct tcm_vhost_tpg *tpg; struct tcm_vhost_cmd *cmd; - u32 exp_data_len, data_first, data_num, data_direction; + u64 tag; + u32 exp_data_len, data_first, data_num, data_direction, prot_first; unsigned out, in, i; - int head, ret; - u8 target; + int head, ret, data_niov, prot_niov, prot_bytes; + size_t req_size; + u16 lun; + u8 *target, *lunp, task_attr; + bool hdr_pi; + void *req, *cdb; mutex_lock(&vq->mutex); /* @@ -1003,7 +1010,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) break; } -/* FIXME: BIDI operation */ + /* FIXME: BIDI operation */ if (out == 1 && in == 1) { data_direction = DMA_NONE; data_first = 0; @@ -1033,29 +1040,38 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) break; } - if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { - vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" - " bytes\n", vq->iov[0].iov_len); + if (vs->dev.acked_features & VIRTIO_SCSI_F_T10_PI) { + req = &v_req_pi; + lunp = &v_req_pi.lun[0]; + target = &v_req_pi.lun[1]; + req_size = sizeof(v_req_pi); + hdr_pi = true; + } else { + req = &v_req; + lunp = &v_req.lun[0]; + target = &v_req.lun[1]; + req_size = sizeof(v_req); + hdr_pi = false; + } + + if (unlikely(vq->iov[0].iov_len < req_size)) { + pr_err("Expecting virtio-scsi header: %zu, got %zu\n", + req_size, vq->iov[0].iov_len); break; } - pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," - " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); - ret = __copy_from_user(&v_req, vq->iov[0].iov_base, - sizeof(v_req)); + ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); if (unlikely(ret)) { vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); break; } /* virtio-scsi spec requires byte 0 of the lun to be 1 */ - if (unlikely(v_req.lun[0] != 1)) { + if (unlikely(*lunp != 1)) { vhost_scsi_send_bad_target(vs, vq, head, out); continue; } - /* Extract the tpgt */ - target = v_req.lun[1]; - tpg = ACCESS_ONCE(vs_tpg[target]); + tpg = ACCESS_ONCE(vs_tpg[*target]); /* Target does not exist, fail the request */ if (unlikely(!tpg)) { @@ -1063,17 +1079,78 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } + data_niov = data_num; + prot_niov = prot_first = prot_bytes = 0; + /* + * Determine if any protection information iovecs are preceeding + * the actual data payload, and adjust data_first + data_niov + * values accordingly for vhost_scsi_map_iov_to_sgl() below. + * + * Also extract virtio_scsi header bits for vhost_scsi_get_tag() + */ + if (hdr_pi) { + if (v_req_pi.pi_bytesout) { + if (data_direction != DMA_TO_DEVICE) { + vq_err(vq, "Received non zero do_pi_niov" + ", but wrong data_direction\n"); + goto err_cmd; + } + prot_bytes = v_req_pi.pi_bytesout; + } else if (v_req_pi.pi_bytesin) { + if (data_direction != DMA_FROM_DEVICE) { + vq_err(vq, "Received non zero di_pi_niov" + ", but wrong data_direction\n"); + goto err_cmd; + } + prot_bytes = v_req_pi.pi_bytesin; + } + if (prot_bytes) { + int tmp = 0; + + for (i = 0; i < data_num; i++) { + tmp += vq->iov[data_first + i].iov_len; + prot_niov++; + if (tmp >= prot_bytes) + break; + } + prot_first = data_first; + data_first += prot_niov; + data_niov = data_num - prot_niov; + } + tag = v_req_pi.tag; + task_attr = v_req_pi.task_attr; + cdb = &v_req_pi.cdb[0]; + lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; + } else { + tag = v_req.tag; + task_attr = v_req.task_attr; + cdb = &v_req.cdb[0]; + lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; + } exp_data_len = 0; - for (i = 0; i < data_num; i++) + for (i = 0; i < data_niov; i++) exp_data_len += vq->iov[data_first + i].iov_len; + /* + * Check that the recieved CDB size does not exceeded our + * hardcoded max for vhost-scsi + * + * TODO what if cdb was too small for varlen cdb header? + */ + if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { + vq_err(vq, "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); + goto err_cmd; + } - cmd = vhost_scsi_get_tag(vq, tpg, &v_req, + cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, exp_data_len, data_direction); if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); goto err_cmd; } + pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" ": %d\n", cmd, exp_data_len, data_direction); @@ -1081,40 +1158,28 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) cmd->tvc_vq = vq; cmd->tvc_resp = vq->iov[out].iov_base; - /* - * Copy in the recieved CDB descriptor into cmd->tvc_cdb - * that will be used by tcm_vhost_new_cmd_map() and down into - * target_setup_cmd_from_cdb() - */ - memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); - /* - * Check that the recieved CDB size does not exceeded our - * hardcoded max for tcm_vhost - */ - /* TODO what if cdb was too small for varlen cdb header? */ - if (unlikely(scsi_command_size(cmd->tvc_cdb) > - TCM_VHOST_MAX_CDB_SIZE)) { - vq_err(vq, "Received SCSI CDB with command_size: %d that" - " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", - scsi_command_size(cmd->tvc_cdb), - TCM_VHOST_MAX_CDB_SIZE); - goto err_free; - } - cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; - pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cmd->tvc_cdb[0], cmd->tvc_lun); + if (prot_niov) { + ret = vhost_scsi_map_iov_to_prot(cmd, + &vq->iov[prot_first], prot_niov, + data_direction == DMA_FROM_DEVICE); + if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to" + " prot_sgl\n"); + goto err_free; + } + } if (data_direction != DMA_NONE) { ret = vhost_scsi_map_iov_to_sgl(cmd, - &vq->iov[data_first], data_num, + &vq->iov[data_first], data_niov, data_direction == DMA_FROM_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); goto err_free; } } - /* * Save the descriptor from vhost_get_vq_desc() to be used to * complete the virtio-scsi request in TCM callback context via @@ -1788,7 +1853,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, tv_nexus->tvn_se_sess = transport_init_session_tags( TCM_VHOST_DEFAULT_TAGS, sizeof(struct tcm_vhost_cmd), - TARGET_PROT_NORMAL); + TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); if (IS_ERR(tv_nexus->tvn_se_sess)) { mutex_unlock(&tpg->tv_tpg_mutex); kfree(tv_nexus); -- cgit v1.2.3 From e6dc783a38ec0f2a5a91edda3f76195dffb17a16 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Feb 2014 18:23:33 -0800 Subject: virtio-scsi: Enable DIF/DIX modes in SCSI host LLD This patch updates virtscsi_probe() to setup necessary Scsi_Host level protection resources. (currently hardcoded to 1) It changes virtscsi_add_cmd() to attach outgoing / incoming protection SGLs preceeding the data payload, and is using the new virtio_scsi_cmd_req_pi->pi_bytes[out,in] field to signal to signal to vhost/scsi bytes to expect for protection data. (Add missing #include for blk_integrity - sfr + nab) Acked-by: Paolo Bonzini Cc: Michael S. Tsirkin Cc: Martin K. Petersen Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: H. Peter Anvin Signed-off-by: Nicholas Bellinger --- drivers/scsi/Kconfig | 1 + drivers/scsi/virtio_scsi.c | 87 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 70 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 02832d64d918..baca5897039f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1773,6 +1773,7 @@ config SCSI_BFA_FC config SCSI_VIRTIO tristate "virtio-scsi support" depends on VIRTIO + select BLK_DEV_INTEGRITY help This is the virtual HBA driver for virtio. If the kernel will be used in a virtual machine, say Y or M. diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 16bfd50cd3fe..1c326b63ca55 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,7 @@ struct virtio_scsi_cmd { struct completion *comp; union { struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_cmd_req_pi cmd_pi; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; @@ -440,7 +442,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq, size_t req_size, size_t resp_size, gfp_t gfp) { struct scsi_cmnd *sc = cmd->sc; - struct scatterlist *sgs[4], req, resp; + struct scatterlist *sgs[6], req, resp; struct sg_table *out, *in; unsigned out_num = 0, in_num = 0; @@ -458,16 +460,24 @@ static int virtscsi_add_cmd(struct virtqueue *vq, sgs[out_num++] = &req; /* Data-out buffer. */ - if (out) + if (out) { + /* Place WRITE protection SGLs before Data OUT payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num++] = scsi_prot_sglist(sc); sgs[out_num++] = out->sgl; + } /* Response header. */ sg_init_one(&resp, &cmd->resp, resp_size); sgs[out_num + in_num++] = &resp; /* Data-in buffer */ - if (in) + if (in) { + /* Place READ protection SGLs before Data IN payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num + in_num++] = scsi_prot_sglist(sc); sgs[out_num + in_num++] = in->sgl; + } return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); } @@ -492,12 +502,44 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, return err; } +static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd, + struct scsi_cmnd *sc) +{ + cmd->lun[0] = 1; + cmd->lun[1] = sc->device->id; + cmd->lun[2] = (sc->device->lun >> 8) | 0x40; + cmd->lun[3] = sc->device->lun & 0xff; + cmd->tag = (unsigned long)sc; + cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; + cmd->prio = 0; + cmd->crn = 0; +} + +static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi, + struct scsi_cmnd *sc) +{ + struct request *rq = sc->request; + struct blk_integrity *bi; + + virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc); + + if (!rq || !scsi_prot_sg_count(sc)) + return; + + bi = blk_get_integrity(rq->rq_disk); + + if (sc->sc_data_direction == DMA_TO_DEVICE) + cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size; + else if (sc->sc_data_direction == DMA_FROM_DEVICE) + cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size; +} + static int virtscsi_queuecommand(struct virtio_scsi *vscsi, struct virtio_scsi_vq *req_vq, struct scsi_cmnd *sc) { struct virtio_scsi_cmd *cmd; - int ret; + int ret, req_size; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); @@ -515,22 +557,20 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; - cmd->req.cmd = (struct virtio_scsi_cmd_req){ - .lun[0] = 1, - .lun[1] = sc->device->id, - .lun[2] = (sc->device->lun >> 8) | 0x40, - .lun[3] = sc->device->lun & 0xff, - .tag = (unsigned long)sc, - .task_attr = VIRTIO_SCSI_S_SIMPLE, - .prio = 0, - .crn = 0, - }; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); - memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); - if (virtscsi_kick_cmd(req_vq, cmd, - sizeof cmd->req.cmd, sizeof cmd->resp.cmd, + if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { + virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc); + memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd_pi); + } else { + virtio_scsi_init_hdr(&cmd->req.cmd, sc); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd); + } + + if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), GFP_ATOMIC) == 0) ret = 0; else @@ -871,7 +911,7 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err; + int err, host_prot; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; @@ -921,6 +961,16 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(shost, host_prot); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } + err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; @@ -990,6 +1040,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, + VIRTIO_SCSI_F_T10_PI, }; static struct virtio_driver virtio_scsi_driver = { -- cgit v1.2.3 From 51a07f84649d2be206c4c2ad9a612956db0c2f8c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 23 May 2014 02:00:56 -0700 Subject: qla2xxx: Convert to percpu_ida session tag pre-allocation This patch converts qla2xxx target code to use generic percpu_ida tag allocation provided by target-core, thus removing the original kmem_cache_zalloc() for each struct qla_tgt_cmd descriptor in the incoming ATIO packet fast-path. This includes the conversion of qlt_handle_cmd_for_atio() to perform qla_tgt_sess lookup before dispatching a command descriptor into qla_tgt_wq process context, along with handling the case where no active session exists, and subsequently kicking off a seperate process context for qlt_create_sess_from_atio() to create a new one. It also includes moving tag allocation into generic code within qlt_get_tag(), so that the same logic can be shared between qlt_handle_cmd_for_atio() + qlt_create_sess_from_atio() contexts. Also, __qlt_do_work() has been made generic between both normal process context in qlt_do_work() + qlt_create_sess_from_atio(). Next, update qlt_free_cmd() to release the percpu-ida tags, and drop the now-unused global qla_tgt_cmd_cachep. Finally in tcm_qla2xxx code, tcm_qla2xxx_check_initiator_node_acl() has been updated to use transport_init_session_tags() along with a hardcoded TCM_QLA2XXX_DEFAULT_TAGS=2088 as the number of qla_tgt_cmd descriptors to pre-allocate per qla_tgt_sess instance. (Use ha->fw_xcb_count if available to calculate num_tags, and also factor in extra pad tags - Quinn) Cc: Saurav Kashyap Cc: Quinn Tran Cc: Giridhar Malavali Cc: Chad Dupuis Cc: Roland Dreier Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 195 +++++++++++++++++++++++++------------ drivers/scsi/qla2xxx/qla_target.h | 6 ++ drivers/scsi/qla2xxx/tcm_qla2xxx.c | 6 +- drivers/scsi/qla2xxx/tcm_qla2xxx.h | 5 + 4 files changed, 147 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0cb73074c199..bd9c725c08e1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, /* * Global Variables */ -static struct kmem_cache *qla_tgt_cmd_cachep; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; @@ -2165,11 +2164,18 @@ done: void qlt_free_cmd(struct qla_tgt_cmd *cmd) { + struct qla_tgt_sess *sess = cmd->sess; + BUG_ON(cmd->sg_mapped); if (unlikely(cmd->free_sg)) kfree(cmd->sg); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); + + if (!sess || !sess->se_sess) { + WARN_ON(1); + return; + } + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); } EXPORT_SYMBOL(qlt_free_cmd); @@ -2489,13 +2495,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, /* * Process context for I/O path into tcm_qla2xxx code */ -static void qlt_do_work(struct work_struct *work) +static void __qlt_do_work(struct qla_tgt_cmd *cmd) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess = NULL; + struct qla_tgt_sess *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -2505,41 +2510,6 @@ static void qlt_do_work(struct work_struct *work) if (tgt->tgt_stop) goto out_term; - spin_lock_irqsave(&ha->hardware_lock, flags); - sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, - atio->u.isp24.fcp_hdr.s_id); - /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ - if (sess) - kref_get(&sess->se_sess->sess_kref); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (unlikely(!sess)) { - uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); - - if (atio->u.raw.entry_count > 1) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry cmd %p\n", cmd); - goto out_term; - } - - mutex_lock(&vha->vha_tgt.tgt_mutex); - sess = qlt_make_local_sess(vha, s_id); - /* sess has an extra creation ref. */ - mutex_unlock(&vha->vha_tgt.tgt_mutex); - - if (!sess) - goto out_term; - } - - cmd->sess = sess; - cmd->loop_id = sess->loop_id; - cmd->conf_compl_supported = sess->conf_compl_supported; - cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -2566,8 +2536,8 @@ static void qlt_do_work(struct work_struct *work) "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", cmd, cmd->unpacked_lun, cmd->tag); - ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, - fcp_task_attr, data_dir, bidi); + ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, + fcp_task_attr, data_dir, bidi); if (ret != 0) goto out_term; /* @@ -2586,17 +2556,114 @@ out_term: */ spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); - if (sess) + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qlt_do_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + __qlt_do_work(cmd); +} + +static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, + struct qla_tgt_sess *sess, + struct atio_from_isp *atio) +{ + struct se_session *se_sess = sess->se_sess; + struct qla_tgt_cmd *cmd; + int tag; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); + if (tag < 0) + return NULL; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + + memcpy(&cmd->atio, atio, sizeof(*atio)); + cmd->state = QLA_TGT_STATE_NEW; + cmd->tgt = vha->vha_tgt.qla_tgt; + cmd->vha = vha; + cmd->se_cmd.map_tag = tag; + cmd->sess = sess; + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + + return cmd; +} + +static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, + uint16_t); + +static void qlt_create_sess_from_atio(struct work_struct *work) +{ + struct qla_tgt_sess_op *op = container_of(work, + struct qla_tgt_sess_op, work); + scsi_qla_host_t *vha = op->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + struct qla_tgt_cmd *cmd; + unsigned long flags; + uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); + + if (op->atio.u.raw.entry_count > 1) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, + "Dropping multy entry atio %p\n", &op->atio); + goto out_term; + } + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has an extra creation ref. */ + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + if (!sess) + goto out_term; + /* + * Now obtain a pre-allocated session tag using the original op->atio + * packet header, and dispatch into __qlt_do_work() using the existing + * process context. + */ + cmd = qlt_get_tag(vha, sess, &op->atio); + if (!cmd) { + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + return; + } + /* + * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release + * the extra reference taken above by qlt_make_local_sess() + */ + __qlt_do_work(cmd); + kfree(op); + return; + +out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_term_exchange(vha, NULL, &op->atio, 1); spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { + struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_sess *sess; struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { @@ -2605,18 +2672,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -EFAULT; } - cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); + if (unlikely(!sess)) { + struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), + GFP_ATOMIC); + if (!op) + return -ENOMEM; + + memcpy(&op->atio, atio, sizeof(*atio)); + INIT_WORK(&op->work, qlt_create_sess_from_atio); + queue_work(qla_tgt_wq, &op->work); + return 0; + } + /* + * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. + */ + kref_get(&sess->se_sess->sess_kref); + + cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); + ha->tgt.tgt_ops->put_sess(sess); return -ENOMEM; } - memcpy(&cmd->atio, atio, sizeof(*atio)); - cmd->state = QLA_TGT_STATE_NEW; - cmd->tgt = vha->vha_tgt.qla_tgt; - cmd->vha = vha; - INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -4911,23 +4991,13 @@ int __init qlt_init(void) if (!QLA_TGT_MODE_ENABLED()) return 0; - qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", - sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, - NULL); - if (!qla_tgt_cmd_cachep) { - ql_log(ql_log_fatal, NULL, 0xe06c, - "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); - return -ENOMEM; - } - qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); - ret = -ENOMEM; - goto out; + return -ENOMEM; } qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, @@ -4955,8 +5025,6 @@ out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); -out: - kmem_cache_destroy(qla_tgt_cmd_cachep); return ret; } @@ -4968,5 +5036,4 @@ void qlt_exit(void) destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); - kmem_cache_destroy(qla_tgt_cmd_cachep); } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index ce33d8c26406..63283c58fb33 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -805,6 +805,12 @@ struct qla_tgt { struct list_head tgt_list_entry; }; +struct qla_tgt_sess_op { + struct scsi_qla_host *vha; + struct atio_from_isp atio; + struct work_struct work; +}; + /* * Equivilant to IT Nexus (Initiator-Target) */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68fb66fdb757..7b3a97026934 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1465,6 +1465,8 @@ static int tcm_qla2xxx_check_initiator_node_acl( struct qla_tgt_sess *sess = qla_tgt_sess; unsigned char port_name[36]; unsigned long flags; + int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : + TCM_QLA2XXX_DEFAULT_TAGS; lport = vha->vha_tgt.target_lport_ptr; if (!lport) { @@ -1482,7 +1484,9 @@ static int tcm_qla2xxx_check_initiator_node_acl( } se_tpg = &tpg->se_tpg; - se_sess = transport_init_session(TARGET_PROT_NORMAL); + se_sess = transport_init_session_tags(num_tags, + sizeof(struct qla_tgt_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(se_sess)) { pr_err("Unable to initialize struct se_session\n"); return PTR_ERR(se_sess); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 33aaac8c7d59..10c002145648 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -4,6 +4,11 @@ #define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 +/* + * Number of pre-allocated per-session tags, based upon the worst-case + * per port number of iocbs + */ +#define TCM_QLA2XXX_DEFAULT_TAGS 2088 #include "qla_target.h" -- cgit v1.2.3 From 91f0abfda1709131edbd62ad3515da2b8e6c0261 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 28 May 2014 12:07:40 -0700 Subject: iscsi-target: Reject zero-length payloads during SecurityNegotiation This patch changes iscsi_target_handle_csg_zero() to explicitly reject login requests in SecurityNegotiation with a zero-length payload, following the language in RFC-3720 Section 8.2: Whenever an iSCSI target gets a response whose keys, or their values, are not according to the step definition, it MUST answer with a Login reject with the "Initiator Error" or "Missing Parameter" status. Previously when a zero-length login request in CSG=0 was received, the target would send a login response with CSG=0 + T_BIT=0 asking the initiator to complete authentication, and not fail the login until MAX_LOGIN_PDUS was reached. This change will now immediately fail the login attempt with ISCSI_STATUS_CLS_INITIATOR_ERR status. Reported-by: Tejas Vaykole Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_nego.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 75b685960e80..fcffd0412d83 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero( } goto do_auth; + } else if (!payload_length) { + pr_err("Initiator sent zero length security payload," + " login failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; } if (login->first_request) -- cgit v1.2.3 From cee6029ecfc2494e7869da20e3ff5aa7f5c4368e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 29 May 2014 13:32:29 -0700 Subject: iscsi-target: Put length of failed allocation in error message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the message "Unable to allocate…" pops up, it's useful to know whether the problem is that the system is genuinely out of memory, or that some bug has led to a crazy allocation length. In particular this helped debug a corruption of login headers in iscsi_login_non_zero_tsih_s1(). Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_parameters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 4d2e23fc76fd..8ca82ab43054 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -1605,7 +1605,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { - pr_err("Unable to allocate memory for tmpbuf.\n"); + pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); return -1; } -- cgit v1.2.3 From 3160723c49605965628c3ee7699e5e956c4f8f51 Mon Sep 17 00:00:00 2001 From: Tejas Vaykole Date: Fri, 30 May 2014 11:13:47 +0530 Subject: iscsi-target: Fix CHAP_A parameter list handling The target is failing to handle list of CHAP_A key-value pair form initiator.The target is expecting CHAP_A=5 always. In other cases, where initiator sends list (for example) CHAP_A=6,5 target is failing the security negotiation. Which is incorrect. This patch handles the case (RFC 3720 section 11.1.4). where in the initiator may send list of CHAP_A values and target replies with appropriate CHAP_A value in response (Drop whitespaces + rename to chap_check_algorithm + save original pointer + add explicit check for CHAP_A key - nab) Signed-off-by: Tejas Vaykole Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_auth.c | 64 +++++++++++++++++++++++++------- drivers/target/iscsi/iscsi_target_auth.h | 1 + 2 files changed, 52 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index de77d9aa22c6..155f33848aa6 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -71,6 +71,40 @@ static void chap_gen_challenge( challenge_asciihex); } +static int chap_check_algorithm(const char *a_str) +{ + char *tmp, *orig, *token; + + tmp = kstrdup(a_str, GFP_KERNEL); + if (!tmp) { + pr_err("Memory allocation failed for CHAP_A temporary buffer\n"); + return CHAP_DIGEST_UNKNOWN; + } + orig = tmp; + + token = strsep(&tmp, "="); + if (!token) + goto out; + + if (strcmp(token, "CHAP_A")) { + pr_err("Unable to locate CHAP_A key\n"); + goto out; + } + while (token) { + token = strsep(&tmp, ","); + if (!token) + goto out; + + if (!strncmp(token, "5", 1)) { + pr_debug("Selected MD5 Algorithm\n"); + kfree(orig); + return CHAP_DIGEST_MD5; + } + } +out: + kfree(orig); + return CHAP_DIGEST_UNKNOWN; +} static struct iscsi_chap *chap_server_open( struct iscsi_conn *conn, @@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open( char *aic_str, unsigned int *aic_len) { + int ret; struct iscsi_chap *chap; if (!(auth->naf_flags & NAF_USERID_SET) || @@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open( return NULL; chap = conn->auth_protocol; - /* - * We only support MD5 MDA presently. - */ - if (strncmp(a_str, "CHAP_A=5", 8)) { - pr_err("CHAP_A is not MD5.\n"); + ret = chap_check_algorithm(a_str); + switch (ret) { + case CHAP_DIGEST_MD5: + pr_debug("[server] Got CHAP_A=5\n"); + /* + * Send back CHAP_A set to MD5. + */ + *aic_len = sprintf(aic_str, "CHAP_A=5"); + *aic_len += 1; + chap->digest_type = CHAP_DIGEST_MD5; + pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + break; + case CHAP_DIGEST_UNKNOWN: + default: + pr_err("Unsupported CHAP_A value\n"); return NULL; } - pr_debug("[server] Got CHAP_A=5\n"); - /* - * Send back CHAP_A set to MD5. - */ - *aic_len = sprintf(aic_str, "CHAP_A=5"); - *aic_len += 1; - chap->digest_type = CHAP_DIGEST_MD5; - pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + /* * Set Identifier. */ diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h index 2f463c09626d..d22f7b96a06c 100644 --- a/drivers/target/iscsi/iscsi_target_auth.h +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -1,6 +1,7 @@ #ifndef _ISCSI_CHAP_H_ #define _ISCSI_CHAP_H_ +#define CHAP_DIGEST_UNKNOWN 0 #define CHAP_DIGEST_MD5 5 #define CHAP_DIGEST_SHA 6 -- cgit v1.2.3 From 5256ffdbdc1cb7e5ea69727698079c6f31b59a4f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 3 Jun 2014 18:28:25 -0700 Subject: iscsi-target: Remove no-op from iscsit_tpg_del_portal_group This patch removes a no-op iscsit_clear_tpg_np_login_threads() call in iscsit_tpg_del_portal_group(), which is unnecessary because iscsit_tpg_del_portal_group() can only ever be removed from configfs once all of the child network portals have been released. Also, go ahed and make iscsit_clear_tpg_np_login_threads() declared as static. Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_tpg.c | 4 +--- drivers/target/iscsi/iscsi_target_tpg.h | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index ca1811858afd..71126761af5f 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -188,7 +188,7 @@ static void iscsit_clear_tpg_np_login_thread( iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); } -void iscsit_clear_tpg_np_login_threads( +static void iscsit_clear_tpg_np_login_threads( struct iscsi_portal_group *tpg, bool shutdown) { @@ -275,8 +275,6 @@ int iscsit_tpg_del_portal_group( tpg->tpg_state = TPG_STATE_INACTIVE; spin_unlock(&tpg->tpg_state_lock); - iscsit_clear_tpg_np_login_threads(tpg, true); - if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { pr_err("Unable to delete iSCSI Target Portal Group:" " %hu while active sessions exist, and force=0\n", diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index 0a182f2aa8a2..e7265337bc43 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, struct iscsi_np *, struct iscsi_tpg_np **); extern int iscsit_get_tpg(struct iscsi_portal_group *); extern void iscsit_put_tpg(struct iscsi_portal_group *); -extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool); extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, -- cgit v1.2.3 From 1d2b60a5545942b1376cb48c1d55843d71e3a08f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 5 Jun 2014 18:08:57 -0700 Subject: iscsi-target: Reject mutual authentication with reflected CHAP_C This patch adds an explicit check in chap_server_compute_md5() to ensure the CHAP_C value received from the initiator during mutual authentication does not match the original CHAP_C provided by the target. This is in line with RFC-3720, section 8.2.1: Originators MUST NOT reuse the CHAP challenge sent by the Responder for the other direction of a bidirectional authentication. Responders MUST check for this condition and close the iSCSI TCP connection if it occurs. Reported-by: Tejas Vaykole Cc: stable@vger.kernel.org # 3.1+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_auth.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 155f33848aa6..19b842c3e0b3 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -351,6 +351,16 @@ static int chap_server_compute_md5( pr_err("Unable to convert incoming challenge\n"); goto out; } + /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } /* * Generate CHAP_N and CHAP_R for mutual authentication. */ -- cgit v1.2.3 From b3e5fe1688b998ba5287a68667ef7cc568739e44 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 5 Jun 2014 16:27:53 -0700 Subject: tcm_fc: Generate TASK_SET_FULL status for DataIN failures This patch changes ft_queue_data_in() to set SAM_STAT_TASK_SET_FULL status upon a lport->tt.seq_send() failure, where it will now stop sending subsequent DataIN, and immediately attempt to send the response with exception status. Sending a response with SAM_STAT_TASK_SET_FULL status is useful in order to signal the initiator that it should try to reduce it's current queue_depth, to lower the number of outstanding I/Os on the wire. Also, add a check to skip sending DataIN if TASK_SET_FULL status has already been set due to a response lport->tt.seq_send() failure, that has asked target-core to requeue a response. Reported-by: Vasu Dev Reviewed-by: Vasu Dev Cc: Jun Wu Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_io.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index e415af32115a..97b486c3dda1 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) if (cmd->aborted) return 0; + + if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) + goto queue_status; + ep = fc_seq_exch(cmd->seq); lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); @@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd) FC_TYPE_FCP, f_ctl, fh_off); error = lport->tt.seq_send(lport, seq, fp); if (error) { - /* XXX For now, initiator will retry */ - pr_err_ratelimited("%s: Failed to send frame %p, " + pr_info_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, remaining, lport->lso_max); + /* + * Go ahead and set TASK_SET_FULL status ignoring the + * rest of the DataIN, and immediately attempt to + * send the response via ft_queue_status() in order + * to notify the initiator that it should reduce it's + * per LUN queue_depth. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + break; } } +queue_status: return ft_queue_status(se_cmd); } -- cgit v1.2.3 From 6dbe7f4e97d55eefcb471c41c16b62fca5f10c68 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 5 Jun 2014 16:37:38 -0700 Subject: tcm_fc: Generate TASK_SET_FULL status for response failures This patch changes ft_queue_status() to set SAM_STAT_TASK_SET_FULL status upon lport->tt.seq_send( failure, and return -EAGAIN to notify target-core to attempt to requeue the response. It also does the same for a fc_frame_alloc() failures, in order to signal the initiator that it should try to reduce it's current queue_depth, to lower the number of outstanding I/Os on the wire. Reported-by: Vasu Dev Reviewed-by: Vasu Dev Cc: Jun Wu Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_cmd.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index f5fd515b2bee..be0c0d08c56a 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd) struct fc_lport *lport; struct fc_exch *ep; size_t len; + int rc; if (cmd->aborted) return 0; @@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd) len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { - /* XXX shouldn't just drop it - requeue and retry? */ - return 0; + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; } + fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; @@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd) fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); - lport->tt.seq_send(lport, cmd->seq, fp); + rc = lport->tt.seq_send(lport, cmd->seq, fp); + if (rc) { + pr_info_ratelimited("%s: Failed to send response frame %p, " + "xid <0x%x>\n", __func__, fp, ep->xid); + /* + * Generate a TASK_SET_FULL status to notify the initiator + * to reduce it's queue_depth after the se_cmd response has + * been re-queued by target-core. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; + } lport->tt.exch_done(cmd->seq); return 0; } -- cgit v1.2.3 From 9aff64e13f3678015c4a08a4fbf320b3a65b5cf1 Mon Sep 17 00:00:00 2001 From: Christophe Vu-Brugier Date: Fri, 6 Jun 2014 17:15:15 +0200 Subject: target/spc: Simplify INQUIRY EVPD=0x80 Signed-off-by: Christophe Vu-Brugier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_spc.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 8653666612a8..17b5b7e099fa 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -129,15 +129,10 @@ static sense_reason_t spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u16 len = 0; + u16 len; if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { - u32 unit_serial_len; - - unit_serial_len = strlen(dev->t10_wwn.unit_serial); - unit_serial_len++; /* For NULL Terminator */ - - len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); + len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } -- cgit v1.2.3 From 0bcc297e2b45c12baf735e1dc1f163e71ea55e16 Mon Sep 17 00:00:00 2001 From: Christophe Vu-Brugier Date: Fri, 6 Jun 2014 17:15:16 +0200 Subject: target: cleanup some boolean tests Convert "x == true" to "x" and "x == false" to "!x". Signed-off-by: Christophe Vu-Brugier Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 12 +++++------- drivers/target/iscsi/iscsi_target_login.c | 2 +- drivers/target/iscsi/iscsi_target_nego.c | 6 +++--- drivers/target/iscsi/iscsi_target_parameters.c | 12 ++++++------ drivers/target/iscsi/iscsi_target_tpg.c | 4 ++-- drivers/target/target_core_transport.c | 4 ++-- drivers/target/target_core_xcopy.c | 10 +++++----- 7 files changed, 24 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 46588c85d39b..2396216dd146 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -300,7 +300,7 @@ bool iscsit_check_np_match( port = ntohs(sock_in->sin_port); } - if ((ip_match == true) && (np->np_port == port) && + if (ip_match && (np->np_port == port) && (np->np_network_transport == network_transport)) return true; @@ -325,7 +325,7 @@ static struct iscsi_np *iscsit_get_np( } match = iscsit_check_np_match(sockaddr, np, network_transport); - if (match == true) { + if (match) { /* * Increment the np_exports reference count now to * prevent iscsit_del_np() below from being called @@ -1120,7 +1120,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, /* * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. */ - if (dump_payload == true) + if (dump_payload) goto after_immediate_data; immed_ret = iscsit_handle_immediate_data(cmd, hdr, @@ -3484,10 +3484,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", - (inaddr_any == false) ? - np->np_ip : conn->local_ip, - (inaddr_any == false) ? - np->np_port : conn->local_port, + inaddr_any ? conn->local_ip : np->np_ip, + inaddr_any ? conn->local_port : np->np_port, tpg->tpgt); len += 1; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index ca31fa1b8a4b..61519b9ff1e6 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1153,7 +1153,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) void iscsi_target_login_sess_out(struct iscsi_conn *conn, struct iscsi_np *np, bool zero_tsih, bool new_sess) { - if (new_sess == false) + if (!new_sess) goto old_sess_out; pr_err("iSCSI Login negotiation failed.\n"); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index fcffd0412d83..62a095f36bf2 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk) } rc = schedule_delayed_work(&conn->login_work, 0); - if (rc == false) { + if (!rc) { pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" " got false\n"); } @@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) state = (tpg->tpg_state == TPG_STATE_ACTIVE); spin_unlock(&tpg->tpg_state_lock); - if (state == false) { + if (!state) { pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); iscsi_target_restore_sock_callbacks(conn); iscsi_target_login_drop(conn, login); @@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) state = iscsi_target_sk_state_check(sk); read_unlock_bh(&sk->sk_callback_lock); - if (state == false) { + if (!state) { pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); iscsi_target_restore_sock_callbacks(conn); iscsi_target_login_drop(conn, login); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 8ca82ab43054..02f9de26f38a 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate( if (!strcmp(param->name, AUTHMETHOD)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, HEADERDIGEST)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, DATADIGEST)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXCONNECTIONS)) { SET_PSTATE_NEGOTIATE(param); @@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, IMMEDIATEDATA)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { continue; @@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, RDMAEXTENSIONS)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } } diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 71126761af5f..2d4cb24bb1a0 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -450,7 +450,7 @@ static bool iscsit_tpg_check_network_portal( match = iscsit_check_np_match(sockaddr, np, network_transport); - if (match == true) + if (match) break; } spin_unlock(&tpg->tpg_np_lock); @@ -472,7 +472,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( if (!tpg_np_parent) { if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, - network_transport) == true) { + network_transport)) { pr_err("Network Portal: %s already exists on a" " different TPG on %s\n", ip_str, tpg->tpg_tiqn->tiqn); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 789aa9eb0a1e..5f192cafbb79 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -504,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess) * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context. */ - if (se_nacl && comp_nacl == true) + if (se_nacl && comp_nacl) target_put_nacl(se_nacl); transport_free_session(se_sess); @@ -2363,7 +2363,7 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref == true) { + if (ack_kref) { kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 669c536fd959..e9186cdf35e9 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -70,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; int rc; - if (src == true) + if (src) dev_wwn = &xop->dst_tid_wwn[0]; else dev_wwn = &xop->src_tid_wwn[0]; @@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op if (rc != 0) continue; - if (src == true) { + if (src) { xop->dst_dev = se_dev; pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" " se_dev\n", xop->dst_dev); @@ -166,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op return -EINVAL; } - if (src == true) { + if (src) { memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* * Determine if the source designator matches the local device @@ -236,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, /* * Assume target descriptors are in source -> destination order.. */ - if (src == true) + if (src) src = false; else src = true; @@ -560,7 +560,7 @@ static int target_xcopy_init_pt_lun( * reservations. The pt_cmd->se_lun pointer will be setup from within * target_xcopy_setup_pt_port() */ - if (remote_port == false) { + if (!remote_port) { pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; return 0; } -- cgit v1.2.3 From f15e9cd910c4d9da7de43f2181f362082fc45f0f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 9 Jun 2014 23:13:20 +0000 Subject: target: Set CMD_T_ACTIVE bit for Task Management Requests This patch fixes a bug where se_cmd descriptors associated with a Task Management Request (TMR) where not setting CMD_T_ACTIVE before being dispatched into target_tmr_work() process context. This is required in order for transport_generic_free_cmd() -> transport_wait_for_tasks() to wait on se_cmd->t_transport_stop_comp if a session reset event occurs while an ABORT_TASK is outstanding waiting for another I/O to complete. Cc: Thomas Glanzmann Cc: Charalampos Pournaris Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5f192cafbb79..195435bf1140 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2934,6 +2934,12 @@ static void target_tmr_work(struct work_struct *work) int transport_generic_handle_tmr( struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state |= CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; -- cgit v1.2.3 From a95d6511303b848da45ee27b35018bb58087bdc6 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 9 Jun 2014 23:36:51 +0000 Subject: target: Use complete_all for se_cmd->t_transport_stop_comp This patch fixes a bug where multiple waiters on ->t_transport_stop_comp occurs due to a concurrent ABORT_TASK and session reset both invoking transport_wait_for_tasks(), while waiting for the associated se_cmd descriptor backend processing to complete. For this case, complete_all() should be invoked in order to wake up both waiters in core_tmr_abort_task() + transport_generic_free_cmd() process contexts. Cc: Thomas Glanzmann Cc: Charalampos Pournaris Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 195435bf1140..15dbf6e97289 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return 1; } @@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) if (cmd->transport_state & CMD_T_ABORTED && cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } else if (!success) { INIT_WORK(&cmd->work, target_complete_failure_work); @@ -1761,7 +1761,7 @@ void target_execute_cmd(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } -- cgit v1.2.3 From bbc050488525e1ab1194c27355f63c66814385b8 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 10 Jun 2014 04:03:54 +0000 Subject: iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory leak This patch fixes a iscsi_queue_req memory leak when ABORT_TASK response has been queued by TFO->queue_tm_rsp() -> lio_queue_tm_rsp() after a long standing I/O completes, but the connection has already reset and waiting for cleanup to complete in iscsit_release_commands_from_conn() -> transport_generic_free_cmd() -> transport_wait_for_tasks() code. It moves iscsit_free_queue_reqs_for_conn() after the per-connection command list has been released, so that the associated se_cmd tag can be completed + released by target-core before freeing any remaining iscsi_queue_req memory for the connection generated by lio_queue_tm_rsp(). Cc: Thomas Glanzmann Cc: Charalampos Pournaris Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 2396216dd146..e0d98344e4bd 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4231,8 +4231,6 @@ int iscsit_close_connection( if (conn->conn_transport->iscsit_wait_conn) conn->conn_transport->iscsit_wait_conn(conn); - iscsit_free_queue_reqs_for_conn(conn); - /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4249,6 +4247,7 @@ int iscsit_close_connection( iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); } + iscsit_free_queue_reqs_for_conn(conn); /* * Handle decrementing session or connection usage count if -- cgit v1.2.3 From e0546fc1ba66c90cb38a5764357366267d3e58e4 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 10 Jun 2014 13:41:41 +0300 Subject: Target/iser: Fix a wrong dereference in case discovery session is over iser In case the discovery session is carried over iser, we can't access the assumed network portal since the default portal is used. In this case we don't really need to allocate the fastreg pool, just prepare to the text pdu that will follow. Signed-off-by: Sagi Grimberg Reported-by: Alex Tabachnik Cc: stable@vger.kernel.org # 3.15+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b622783ab198..647a5e2beee4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1053,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, } if (!login->login_failed) { if (login->login_complete) { - if (isert_conn->conn_device->use_fastreg) { + if (!conn->sess->sess_ops->SessionType && + isert_conn->conn_device->use_fastreg) { + /* Normal Session and fastreg is used */ u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; ret = isert_conn_create_fastreg_pool(isert_conn, -- cgit v1.2.3 From 22c7aaa57e80853b4904a46c18f97db0036a3b97 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 10 Jun 2014 18:27:59 +0300 Subject: Target/iscsi: Fix sendtargets response pdu for iser transport In case the transport is iser we should not include the iscsi target info in the sendtargets text response pdu. This causes sendtargets response to include the target info twice. Modify iscsit_build_sendtargets_response to filter transport types that don't match. Signed-off-by: Sagi Grimberg Reported-by: Slava Shwartsman Cc: stable@vger.kernel.org # 3.11+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 2 +- drivers/target/iscsi/iscsi_target.c | 14 ++++++++++---- include/target/iscsi/iscsi_transport.h | 3 ++- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 647a5e2beee4..ba619fa84662 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2318,7 +2318,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) int rc; isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); - rc = iscsit_build_text_rsp(cmd, conn, hdr); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); if (rc < 0) return rc; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index e0d98344e4bd..b87721a01b74 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3389,7 +3389,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) #define SENDTARGETS_BUF_LIMIT 32768U -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) +static int +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, + enum iscsit_transport_type network_transport) { char *payload = NULL; struct iscsi_conn *conn = cmd->conn; @@ -3466,6 +3468,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (np->np_network_transport != network_transport) + continue; + if (!target_name_printed) { len = sprintf(buf, "TargetName=%s", tiqn->tiqn); @@ -3517,11 +3522,12 @@ eob: int iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, - struct iscsi_text_rsp *hdr) + struct iscsi_text_rsp *hdr, + enum iscsit_transport_type network_transport) { int text_length, padding; - text_length = iscsit_build_sendtargets_response(cmd); + text_length = iscsit_build_sendtargets_response(cmd, network_transport); if (text_length < 0) return text_length; @@ -3559,7 +3565,7 @@ static int iscsit_send_text_rsp( u32 tx_size = 0; int text_length, iov_count = 0, rc; - rc = iscsit_build_text_rsp(cmd, conn, hdr); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); if (rc < 0) return rc; diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 33b487b5da92..daef9daa500c 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *, extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_tm_rsp *); extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *, - struct iscsi_text_rsp *); + struct iscsi_text_rsp *, + enum iscsit_transport_type); extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_reject *); extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, -- cgit v1.2.3 From 6ef31dc720cff4dc6a67ccb8c73c56dbf2ea6a08 Mon Sep 17 00:00:00 2001 From: Christophe Vu-Brugier Date: Tue, 10 Jun 2014 17:53:21 +0200 Subject: target/sbc: Remove sbc_check_valid_sectors() A similar check is performed at the end of sbc_parse_cdb() and is now enforced if the SYNCHRONIZE CACHE command's backend supports ->execute_sync_cache(). (Add check_lba goto to avoid *_max_sectors checks - nab) Signed-off-by: Christophe Vu-Brugier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_sbc.c | 45 ++++++---------------------------------- 1 file changed, 6 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index e0229592ec55..7675d2aaa0f7 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -176,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) return cmd->se_dev->dev_attrib.block_size * sectors; } -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned long long end_lba; - u32 sectors; - - sectors = cmd->data_length / dev->dev_attrib.block_size; - end_lba = dev->transport->get_blocks(dev) + 1; - - if (cmd->t_task_lba + sectors > end_lba) { - pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", - cmd->t_task_lba, sectors, end_lba); - return -EINVAL; - } - - return 0; -} - static inline u32 transport_get_sectors_6(unsigned char *cdb) { /* @@ -877,15 +859,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) { - size = 0; - cmd->execute_cmd = sbc_emulate_noop; - break; - } - - /* - * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE - */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); @@ -893,18 +866,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); } - - size = sbc_get_size(cmd, sectors); - - /* - * Check to ensure that LBA + Range does not exceed past end of - * device for IBLOCK and FILEIO ->do_sync_cache() backend calls - */ - if (cmd->t_task_lba || sectors) { - if (sbc_check_valid_sectors(cmd) < 0) - return TCM_ADDRESS_OUT_OF_RANGE; + if (ops->execute_sync_cache) { + cmd->execute_cmd = ops->execute_sync_cache; + goto check_lba; } - cmd->execute_cmd = ops->execute_sync_cache; + size = 0; + cmd->execute_cmd = sbc_emulate_noop; break; case UNMAP: if (!ops->execute_unmap) @@ -988,7 +955,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) dev->dev_attrib.hw_max_sectors); return TCM_INVALID_CDB_FIELD; } - +check_lba: end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { pr_err("cmd exceeds last lba %llu " -- cgit v1.2.3 From c52716defd6ec7e89c510c740de7ec3478008f28 Mon Sep 17 00:00:00 2001 From: Christophe Vu-Brugier Date: Tue, 10 Jun 2014 17:53:22 +0200 Subject: target/sbc: Check that the LBA and number of blocks are correct in VERIFY This patch extracts LBA + sectors for VERIFY, and adds a goto check_lba to perform the end-of-device checking. (Update patch to drop lba_check usage - nab) Signed-off-by: Christophe Vu-Brugier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_sbc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 7675d2aaa0f7..97a33603795d 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -914,8 +914,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case VERIFY: size = 0; + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->execute_cmd = sbc_emulate_noop; - break; + goto check_lba; case REZERO_UNIT: case SEEK_6: case SEEK_10: -- cgit v1.2.3 From 2426bd456a61407388b6e61fc5f98dbcbebc50e2 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 10 Jun 2014 11:07:47 -0700 Subject: target: Report correct response length for some commands When an initiator sends an allocation length bigger than what its command consumes, the target should only return the actual response data and set the residual length to the unused part of the allocation length. Add a helper function that command handlers (INQUIRY, READ CAPACITY, etc) can use to do this correctly, and use this code to get the correct residual for commands that don't use the full initiator allocation in the handlers for READ CAPACITY, READ CAPACITY(16), INQUIRY, MODE SENSE and REPORT LUNS. This addresses a handful of failures as reported by Christophe with the Windows Certification Kit: http://permalink.gmane.org/gmane.linux.scsi.target.devel/6515 Signed-off-by: Roland Dreier Tested-by: Christophe Vu-Brugier Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_sbc.c | 4 ++-- drivers/target/target_core_spc.c | 9 ++++++--- drivers/target/target_core_transport.c | 17 +++++++++++++++++ include/target/target_core_backend.h | 1 + 4 files changed, 26 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 97a33603795d..1d3a626bf24f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8); return 0; } @@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 32); return 0; } diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 17b5b7e099fa..6cd7222738fc 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -716,6 +716,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) unsigned char *buf; sense_reason_t ret; int p; + int len = 0; buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); if (!buf) { @@ -737,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) } ret = spc_emulate_inquiry_std(cmd, buf); + len = buf[4] + 5; goto out; } @@ -744,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; ret = evpd_handlers[p].emulate(cmd, buf); + len = get_unaligned_be16(&buf[2]) + 4; goto out; } } @@ -760,7 +763,7 @@ out: kfree(buf); if (!ret) - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, len); return ret; } @@ -1098,7 +1101,7 @@ set_length: transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, length); return 0; } @@ -1274,7 +1277,7 @@ done: buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); return 0; } EXPORT_SYMBOL(spc_emulate_report_luns); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 15dbf6e97289..c9e8b35a954f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 3a1c1eea1fff..9adc1bca1178 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *); void transport_subsystem_release(struct se_subsystem_api *); void target_complete_cmd(struct se_cmd *, u8); +void target_complete_cmd_with_length(struct se_cmd *, u8, int); sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); -- cgit v1.2.3 From d77e65350f2d82dfa0557707d505711f5a43c8fd Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 11 Jun 2014 12:09:58 +0300 Subject: libiscsi, iser: Adjust data_length to include protection information In case protection information exists over the wire iscsi header data length is required to include it. Use protection information aware scsi helpers to set the correct transfer length. In order to avoid breakage, remove iser transfer length checks for each task as they are not always true and somewhat redundant anyway. Signed-off-by: Sagi Grimberg Reviewed-by: Mike Christie Acked-by: Mike Christie Cc: stable@vger.kernel.org # 3.15+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/iser/iser_initiator.c | 34 ++++++++-------------------- drivers/scsi/libiscsi.c | 18 +++++++-------- 2 files changed, 19 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 2e2d903db838..8d44a4060634 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -41,11 +41,11 @@ #include "iscsi_iser.h" /* Register user buffer memory and initialize passive rdma - * dto descriptor. Total data size is stored in - * iser_task->data[ISER_DIR_IN].data_len + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_IN].data_len, Protection size + * os stored in task->prot[ISER_DIR_IN].data_len */ -static int iser_prepare_read_cmd(struct iscsi_task *task, - unsigned int edtl) +static int iser_prepare_read_cmd(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; @@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, return err; } - if (edtl > iser_task->data[ISER_DIR_IN].data_len) { - iser_err("Total data length: %ld, less than EDTL: " - "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", - iser_task->data[ISER_DIR_IN].data_len, edtl, - task->itt, iser_task->ib_conn); - return -EINVAL; - } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); if (err) { iser_err("Failed to set up Data-IN RDMA\n"); @@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, } /* Register user buffer memory and initialize passive rdma - * dto descriptor. Total data size is stored in - * task->data[ISER_DIR_OUT].data_len + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_OUT].data_len, Protection size + * is stored at task->prot[ISER_DIR_OUT].data_len */ static int iser_prepare_write_cmd(struct iscsi_task *task, @@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task, return err; } - if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { - iser_err("Total data length: %ld, less than EDTL: %d, " - "in WRITE cmd BHS itt: %d, conn: 0x%p\n", - iser_task->data[ISER_DIR_OUT].data_len, - edtl, task->itt, task->conn); - return -EINVAL; - } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); if (err != 0) { iser_err("Failed to register write cmd RDMA mem\n"); @@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn, if (scsi_prot_sg_count(sc)) { prot_buf->buf = scsi_prot_sglist(sc); prot_buf->size = scsi_prot_sg_count(sc); - prot_buf->data_len = sc->prot_sdb->length; + prot_buf->data_len = data_buf->data_len >> + ilog2(sc->device->sector_size) * 8; } if (hdr->flags & ISCSI_FLAG_CMD_READ) { - err = iser_prepare_read_cmd(task, edtl); + err = iser_prepare_read_cmd(task); if (err) goto send_command_error; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 26dc005bb0f0..3f462349b16c 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; struct iscsi_scsi_req *hdr; - unsigned hdrlength, cmd_len; + unsigned hdrlength, cmd_len, transfer_length; itt_t itt; int rc; @@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) task->protected = true; + transfer_length = scsi_transfer_length(sc); + hdr->data_length = cpu_to_be32(transfer_length); if (sc->sc_data_direction == DMA_TO_DEVICE) { - unsigned out_len = scsi_out(sc)->length; struct iscsi_r2t_info *r2t = &task->unsol_r2t; - hdr->data_length = cpu_to_be32(out_len); hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* * Write counters: @@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) memset(r2t, 0, sizeof(*r2t)); if (session->imm_data_en) { - if (out_len >= session->first_burst) + if (transfer_length >= session->first_burst) task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else - task->imm_count = min(out_len, - conn->max_xmit_dlength); + task->imm_count = min(transfer_length, + conn->max_xmit_dlength); hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { - r2t->data_length = min(session->first_burst, out_len) - + r2t->data_length = min(session->first_burst, + transfer_length) - task->imm_count; r2t->data_offset = task->imm_count; r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); @@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } else { hdr->flags |= ISCSI_FLAG_CMD_FINAL; zero_data(hdr->dlength); - hdr->data_length = cpu_to_be32(scsi_in(sc)->length); if (sc->sc_data_direction == DMA_FROM_DEVICE) hdr->flags |= ISCSI_FLAG_CMD_READ; @@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) scsi_bidi_cmnd(sc) ? "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], - task->itt, scsi_bufflen(sc), + task->itt, transfer_length, scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); -- cgit v1.2.3 From e2a4f55c6498b59a17a85a1bb6db122a993ffe02 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 11 Jun 2014 12:09:59 +0300 Subject: TARGET/sbc,loopback: Adjust command data length in case pi exists on the wire In various areas of the code, it is assumed that se_cmd->data_length describes pure data. In case that protection information exists over the wire (protect bits is are on) the target core re-calculates the data length from the CDB and the backed device block size (instead of each transport peeking in the cdb). Modify loopback device to include protection information in the transferred data length (like other scsi transports). Signed-off-by: Sagi Grimberg Cc: stable@vger.kernel.org # 3.15+ Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 15 ++++++++++++--- drivers/target/target_core_sbc.c | 15 +++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c886ad1c39fb..1f4c015e9078 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work) struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; struct scatterlist *sgl_bidi = NULL; - u32 sgl_bidi_count = 0; + u32 sgl_bidi_count = 0, transfer_length; int rc; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); @@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work) } - if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) + transfer_length = scsi_transfer_length(sc); + if (!scsi_prot_sg_count(sc) && + scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { se_cmd->prot_pto = true; + /* + * loopback transport doesn't support + * WRITE_GENERATE, READ_STRIP protection + * information operations, go ahead unprotected. + */ + transfer_length = scsi_bufflen(sc); + } rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - scsi_bufflen(sc), tcm_loop_sam_attr(sc), + transfer_length, tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 1d3a626bf24f..bd78d9235ac6 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -647,8 +647,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, cmd->prot_type = dev->dev_attrib.pi_prot_type; cmd->prot_length = dev->prot_length * sectors; - pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n", - __func__, cmd->prot_type, cmd->prot_length, + + /** + * In case protection information exists over the wire + * we modify command data length to describe pure data. + * The actual transfer length is data length + protection + * length + **/ + if (protect) + cmd->data_length = sectors * dev->dev_attrib.block_size; + + pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " + "prot_op=%d prot_checks=%d\n", + __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, cmd->prot_op, cmd->prot_checks); return true; -- cgit v1.2.3 From 9f977ef7b671f6169eca78bf40f230fe84b7c7e5 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 10 Jun 2014 01:19:38 -0700 Subject: vhost-scsi: Include prot_bytes into expected data transfer length This patch updates vhost_scsi_get_tag() to accept the combined expected data transfer length + T10 PI bytes as the value passed into target_submit_cmd(). This is required now that target-core logic in commit 14ef9200 expects to subtract se_cmd->prot_length from se_cmd->data_length. Cc: Paolo Bonzini Cc: Michael S. Tsirkin Cc: Martin K. Petersen Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/vhost/scsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 667e72d46998..03e484fa1ef4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1144,7 +1144,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) } cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, - exp_data_len, data_direction); + exp_data_len + prot_bytes, + data_direction); if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); -- cgit v1.2.3 From 0ed6e189e3f6ac3a25383ed5cc8b0ac24c9b97b7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 12 Jun 2014 12:45:02 -0700 Subject: target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd This patch fixes a NULL pointer dereference regression bug that was introduced with: commit 1e1110c43b1cda9fe77fc4a04835e460550e6b3c Author: Mikulas Patocka Date: Sat May 17 06:49:22 2014 -0400 target: fix memory leak on XCOPY Now that target_put_sess_cmd() -> kref_put_spinlock_irqsave() is called with a valid se_cmd->cmd_kref, a NULL pointer dereference is triggered because the XCOPY passthrough commands don't have an associated se_session pointer. To address this bug, go ahead and checking for a NULL se_sess pointer within target_put_sess_cmd(), and call se_cmd->se_tfo->release_cmd() to release the XCOPY's xcopy_pt_cmd memory. Reported-by: Thomas Glanzmann Cc: Thomas Glanzmann Cc: Mikulas Patocka Cc: stable@vger.kernel.org # 3.12+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c9e8b35a954f..4ef11145c746 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2424,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { + if (!se_sess) { + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, &se_sess->sess_cmd_lock); } -- cgit v1.2.3