diff options
Diffstat (limited to 'drivers/vhost/scsi.c')
-rw-r--r-- | drivers/vhost/scsi.c | 174 |
1 files changed, 64 insertions, 110 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b244e7c0f514..bb10fa4bb4f6 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -125,7 +125,6 @@ struct vhost_scsi_tpg { struct se_portal_group se_tpg; /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ struct vhost_scsi *vhost_scsi; - struct list_head tmf_queue; }; struct vhost_scsi_tport { @@ -206,10 +205,8 @@ struct vhost_scsi { struct vhost_scsi_tmf { struct vhost_work vwork; - struct vhost_scsi_tpg *tpg; struct vhost_scsi *vhost; struct vhost_scsi_virtqueue *svq; - struct list_head queue_entry; struct se_cmd se_cmd; u8 scsi_resp; @@ -232,7 +229,10 @@ struct vhost_scsi_ctx { struct iov_iter out_iter; }; -/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ +/* + * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO + * configfs management operations. + */ static DEFINE_MUTEX(vhost_scsi_mutex); static LIST_HEAD(vhost_scsi_list); @@ -294,11 +294,6 @@ static int vhost_scsi_check_true(struct se_portal_group *se_tpg) return 1; } -static int vhost_scsi_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, @@ -323,11 +318,6 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) return tpg->tv_fabric_prot_type; } -static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) -{ - return 1; -} - static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, @@ -352,12 +342,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) { - struct vhost_scsi_tpg *tpg = tmf->tpg; struct vhost_scsi_inflight *inflight = tmf->inflight; - mutex_lock(&tpg->tv_tpg_mutex); - list_add_tail(&tpg->tmf_queue, &tmf->queue_entry); - mutex_unlock(&tpg->tv_tpg_mutex); + kfree(tmf); vhost_scsi_put_inflight(inflight); } @@ -378,11 +365,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) } } -static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) -{ - return 0; -} - static int vhost_scsi_write_pending(struct se_cmd *se_cmd) { /* Go ahead and process the write immediately */ @@ -390,16 +372,6 @@ static int vhost_scsi_write_pending(struct se_cmd *se_cmd) return 0; } -static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) -{ - return; -} - -static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) -{ - return 0; -} - static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) { transport_generic_free_cmd(se_cmd, 0); @@ -671,7 +643,7 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) { int sgl_count = 0; - if (!iter || !iter->iov) { + if (!iter || !iter_iov(iter)) { pr_err("%s: iter->iov is NULL, but expected bytes: %zu" " present\n", __func__, bytes); return -EINVAL; @@ -1194,19 +1166,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, goto send_reject; } - mutex_lock(&tpg->tv_tpg_mutex); - if (list_empty(&tpg->tmf_queue)) { - pr_err("Missing reserve TMF. Could not handle LUN RESET.\n"); - mutex_unlock(&tpg->tv_tpg_mutex); + tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); + if (!tmf) goto send_reject; - } - - tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, - queue_entry); - list_del_init(&tmf->queue_entry); - mutex_unlock(&tpg->tv_tpg_mutex); - tmf->tpg = tpg; + vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); tmf->vhost = vs; tmf->svq = svq; tmf->resp_iov = vq->iov[vc->out]; @@ -1540,7 +1504,7 @@ out: * vhost_scsi_tpg with an active struct vhost_scsi_nexus * * The lock nesting rule is: - * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex + * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex */ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, @@ -1554,7 +1518,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, int index, ret, i, len; bool match = false; - mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -1575,6 +1538,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, if (vs->vs_tpg) memcpy(vs_tpg, vs->vs_tpg, len); + mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { mutex_lock(&tpg->tv_tpg_mutex); if (!tpg->tpg_nexus) { @@ -1590,6 +1554,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { mutex_unlock(&tpg->tv_tpg_mutex); + mutex_unlock(&vhost_scsi_mutex); ret = -EEXIST; goto undepend; } @@ -1604,6 +1569,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, if (ret) { pr_warn("target_depend_item() failed: %d\n", ret); mutex_unlock(&tpg->tv_tpg_mutex); + mutex_unlock(&vhost_scsi_mutex); goto undepend; } tpg->tv_tpg_vhost_count++; @@ -1613,6 +1579,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } mutex_unlock(&tpg->tv_tpg_mutex); } + mutex_unlock(&vhost_scsi_mutex); if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, @@ -1658,14 +1625,16 @@ undepend: for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { tpg = vs_tpg[i]; if (tpg) { + mutex_lock(&tpg->tv_tpg_mutex); + tpg->vhost_scsi = NULL; tpg->tv_tpg_vhost_count--; + mutex_unlock(&tpg->tv_tpg_mutex); target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return ret; } @@ -1681,7 +1650,6 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, int index, ret, i; u8 target; - mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { @@ -1702,11 +1670,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, if (!tpg) continue; - mutex_lock(&tpg->tv_tpg_mutex); tv_tport = tpg->tport; if (!tv_tport) { ret = -ENODEV; - goto err_tpg; + goto err_dev; } if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { @@ -1715,35 +1682,51 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, tv_tport->tport_name, tpg->tport_tpgt, t->vhost_wwpn, t->vhost_tpgt); ret = -EINVAL; - goto err_tpg; + goto err_dev; } + match = true; + } + if (!match) + goto free_vs_tpg; + + /* Prevent new cmds from starting and accessing the tpgs/sessions */ + for (i = 0; i < vs->dev.nvqs; i++) { + vq = &vs->vqs[i].vq; + mutex_lock(&vq->mutex); + vhost_vq_set_backend(vq, NULL); + mutex_unlock(&vq->mutex); + } + /* Make sure cmds are not running before tearing them down. */ + vhost_scsi_flush(vs); + + for (i = 0; i < vs->dev.nvqs; i++) { + vq = &vs->vqs[i].vq; + vhost_scsi_destroy_vq_cmds(vq); + } + + /* + * We can now release our hold on the tpg and sessions and userspace + * can free them after this point. + */ + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { + target = i; + tpg = vs->vs_tpg[target]; + if (!tpg) + continue; + + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_vhost_count--; tpg->vhost_scsi = NULL; vs->vs_tpg[target] = NULL; - match = true; + mutex_unlock(&tpg->tv_tpg_mutex); - /* - * Release se_tpg->tpg_group.cg_item configfs dependency now - * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. - */ + se_tpg = &tpg->se_tpg; target_undepend_item(&se_tpg->tpg_group.cg_item); } - if (match) { - for (i = 0; i < vs->dev.nvqs; i++) { - vq = &vs->vqs[i].vq; - mutex_lock(&vq->mutex); - vhost_vq_set_backend(vq, NULL); - mutex_unlock(&vq->mutex); - } - /* Make sure cmds are not running before tearing them down. */ - vhost_scsi_flush(vs); - for (i = 0; i < vs->dev.nvqs; i++) { - vq = &vs->vqs[i].vq; - vhost_scsi_destroy_vq_cmds(vq); - } - } +free_vs_tpg: /* * Act as synchronize_rcu to make sure access to * old vs->vs_tpg is finished. @@ -1753,14 +1736,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vs->vs_tpg = NULL; WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return 0; -err_tpg: - mutex_unlock(&tpg->tv_tpg_mutex); err_dev: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return ret; } @@ -2001,8 +1980,6 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, if (!vs) return; - mutex_lock(&vs->dev.mutex); - if (plug) reason = VIRTIO_SCSI_EVT_RESET_RESCAN; else @@ -2010,11 +1987,18 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; mutex_lock(&vq->mutex); + /* + * We can't queue events if the backend has been cleared, because + * we could end up queueing an event after the flush. + */ + if (!vhost_vq_get_backend(vq)) + goto unlock; + if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) vhost_scsi_send_evt(vs, tpg, lun, VIRTIO_SCSI_T_TRANSPORT_RESET, reason); +unlock: mutex_unlock(&vq->mutex); - mutex_unlock(&vs->dev.mutex); } static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) @@ -2032,24 +2016,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg, { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); - struct vhost_scsi_tmf *tmf; - - tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); - if (!tmf) - return -ENOMEM; - INIT_LIST_HEAD(&tmf->queue_entry); - vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); - - mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count++; - list_add_tail(&tmf->queue_entry, &tpg->tmf_queue); - mutex_unlock(&tpg->tv_tpg_mutex); - vhost_scsi_hotplug(tpg, lun); - - mutex_unlock(&vhost_scsi_mutex); + mutex_unlock(&tpg->tv_tpg_mutex); return 0; } @@ -2059,21 +2030,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); - struct vhost_scsi_tmf *tmf; - - mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count--; - tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, - queue_entry); - list_del(&tmf->queue_entry); - kfree(tmf); - mutex_unlock(&tpg->tv_tpg_mutex); - vhost_scsi_hotunplug(tpg, lun); - - mutex_unlock(&vhost_scsi_mutex); + mutex_unlock(&tpg->tv_tpg_mutex); } static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store( @@ -2329,7 +2290,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name) } mutex_init(&tpg->tv_tpg_mutex); INIT_LIST_HEAD(&tpg->tv_tpg_list); - INIT_LIST_HEAD(&tpg->tmf_queue); tpg->tport = tport; tpg->tport_tpgt = tpgt; @@ -2460,17 +2420,11 @@ static const struct target_core_fabric_ops vhost_scsi_ops = { .tpg_get_tag = vhost_scsi_get_tpgt, .tpg_check_demo_mode = vhost_scsi_check_true, .tpg_check_demo_mode_cache = vhost_scsi_check_true, - .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, - .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, - .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, .release_cmd = vhost_scsi_release_cmd, .check_stop_free = vhost_scsi_check_stop_free, - .sess_get_index = vhost_scsi_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = vhost_scsi_write_pending, - .set_default_node_attributes = vhost_scsi_set_default_node_attrs, - .get_cmd_state = vhost_scsi_get_cmd_state, .queue_data_in = vhost_scsi_queue_data_in, .queue_status = vhost_scsi_queue_status, .queue_tm_rsp = vhost_scsi_queue_tm_rsp, |