summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/fc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/fc.c')
-rw-r--r--drivers/nvme/host/fc.c332
1 files changed, 140 insertions, 192 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 92964cef0f4b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -36,7 +36,7 @@
*/
#define NVME_FC_NR_AEN_COMMANDS 1
#define NVME_FC_AQ_BLKMQ_DEPTH \
- (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+ (NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
enum nvme_fc_queue_flags {
@@ -148,25 +148,22 @@ struct nvme_fc_ctrl {
struct device *dev;
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
- u32 queue_count;
u32 cnum;
u64 association_id;
- u64 cap;
-
struct list_head ctrl_list; /* rport->ctrl_list */
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
struct work_struct delete_work;
- struct work_struct reset_work;
struct delayed_work connect_work;
struct kref ref;
u32 flags;
u32 iocnt;
+ wait_queue_head_t ioabort_wait;
struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
@@ -214,7 +211,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
@@ -878,8 +874,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
- memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
- min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+ uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
@@ -1242,8 +1237,10 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
spin_lock_irqsave(&ctrl->lock, flags);
if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
- if (ctrl->flags & FCCTRL_TERMIO)
- ctrl->iocnt--;
+ if (ctrl->flags & FCCTRL_TERMIO) {
+ if (!--ctrl->iocnt)
+ wake_up(&ctrl->ioabort_wait);
+ }
}
if (op->flags & FCOP_FLAGS_RELEASED)
complete_rq = true;
@@ -1450,18 +1447,8 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
-
- return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
-}
-
-static int
-nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_queue *queue = &ctrl->queues[0];
+ int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+ struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
@@ -1624,7 +1611,7 @@ nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
{
int i;
- for (i = 1; i < ctrl->queue_count; i++)
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
nvme_fc_free_queue(&ctrl->queues[i]);
}
@@ -1645,10 +1632,10 @@ __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
static void
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
{
- struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
+ struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
int i;
- for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
+ for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
__nvme_fc_delete_hw_queue(ctrl, queue, i);
}
@@ -1658,7 +1645,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
struct nvme_fc_queue *queue = &ctrl->queues[1];
int i, ret;
- for (i = 1; i < ctrl->queue_count; i++, queue++) {
+ for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
if (ret)
goto delete_queues;
@@ -1677,7 +1664,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{
int i, ret = 0;
- for (i = 1; i < ctrl->queue_count; i++) {
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
(qsize / 5));
if (ret)
@@ -1695,7 +1682,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
{
int i;
- for (i = 1; i < ctrl->queue_count; i++)
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
}
@@ -1716,6 +1703,7 @@ nvme_fc_ctrl_free(struct kref *ref)
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -1759,16 +1747,16 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ /* only proceed if in LIVE state - e.g. on first error */
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ return;
+
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport association error detected: %s\n",
ctrl->cnum, errmsg);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
- /* stop the queues on error, cleanup is in reset thread */
- if (ctrl->queue_count > 1)
- nvme_stop_queues(&ctrl->ctrl);
-
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
@@ -1776,10 +1764,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
return;
}
- if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Failed to schedule "
- "reset work\n", ctrl->cnum);
+ nvme_reset_ctrl(&ctrl->ctrl);
}
static enum blk_eh_timer_return
@@ -1888,7 +1873,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
* level FC exchange resource that is also outstanding. This must be
* considered in all cleanup operations.
*/
-static int
+static blk_status_t
nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
struct nvme_fc_fcp_op *op, u32 data_len,
enum nvmefc_fcp_datadir io_dir)
@@ -1903,10 +1888,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- return BLK_MQ_RQ_QUEUE_ERROR;
+ goto busy;
if (!nvme_fc_ctrl_get(ctrl))
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_IOERR;
/* format the FC-NVME CMD IU and fcp_req */
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
@@ -1954,8 +1939,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (ret < 0) {
nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
- return (ret == -ENOMEM || ret == -EAGAIN) ?
- BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return BLK_STS_RESOURCE;
+ return BLK_STS_IOERR;
}
}
@@ -1972,28 +1958,28 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue->lldd_handle, &op->fcp_req);
if (ret) {
- if (op->rq) { /* normal request */
+ if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
- nvme_cleanup_cmd(op->rq);
- }
- /* else - aen. no cleanup needed */
nvme_fc_ctrl_put(ctrl);
- if (ret != -EBUSY)
- return BLK_MQ_RQ_QUEUE_ERROR;
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
+ ret != -EBUSY)
+ return BLK_STS_IOERR;
- if (op->rq) {
- blk_mq_stop_hw_queues(op->rq->q);
- blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
- }
- return BLK_MQ_RQ_QUEUE_BUSY;
+ goto busy;
}
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
+
+busy:
+ if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
+ blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+
+ return BLK_STS_RESOURCE;
}
-static int
+static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2006,7 +1992,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir;
u32 data_len;
- int ret;
+ blk_status_t ret;
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
@@ -2061,7 +2047,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
struct nvme_fc_fcp_op *aen_op;
unsigned long flags;
bool terminating = false;
- int ret;
+ blk_status_t ret;
if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
return;
@@ -2093,7 +2079,6 @@ __nvme_fc_final_op_cleanup(struct request *rq)
op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
FCOP_FLAGS_COMPLETE);
- nvme_cleanup_cmd(rq);
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
@@ -2193,17 +2178,20 @@ static int
nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
int ret;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ ctrl->lport->ops->max_hw_queues);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
dev_info(ctrl->ctrl.device,
"set_queue_count failed: %d\n", ret);
return ret;
}
- ctrl->queue_count = opts->nr_io_queues + 1;
- if (!opts->nr_io_queues)
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
+ if (!nr_io_queues)
return 0;
nvme_fc_init_io_queues(ctrl);
@@ -2219,7 +2207,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
sizeof(struct scatterlist)) +
ctrl->lport->ops->fcprqst_priv_sz;
ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
@@ -2247,7 +2235,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
out_cleanup_blk_queue:
- nvme_stop_keep_alive(&ctrl->ctrl);
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
blk_mq_free_tag_set(&ctrl->tag_set);
@@ -2263,17 +2250,21 @@ static int
nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
int ret;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ ctrl->lport->ops->max_hw_queues);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
dev_info(ctrl->ctrl.device,
"set_queue_count failed: %d\n", ret);
return ret;
}
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
/* check for io queues existing */
- if (ctrl->queue_count == 1)
+ if (ctrl->ctrl.queue_count == 1)
return 0;
nvme_fc_init_io_queues(ctrl);
@@ -2290,6 +2281,8 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+
return 0;
out_delete_hw_queues:
@@ -2311,7 +2304,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
int ret;
bool changed;
- ++ctrl->ctrl.opts->nr_reconnects;
+ ++ctrl->ctrl.nr_reconnects;
/*
* Create the admin queue
@@ -2331,7 +2324,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
goto out_delete_hw_queue;
if (ctrl->ctrl.state != NVME_CTRL_NEW)
- blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (ret)
@@ -2344,7 +2337,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
* prior connection values
*/
- ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (ret) {
dev_err(ctrl->ctrl.device,
"prop_get NVME_REG_CAP failed\n");
@@ -2352,9 +2345,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
- ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret)
goto out_disconnect_admin_queue;
@@ -2375,8 +2368,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
goto out_disconnect_admin_queue;
}
- nvme_start_keep_alive(&ctrl->ctrl);
-
/* FC-NVME supports normal SGL Data Block Descriptors */
if (opts->queue_size > ctrl->ctrl.maxcmd) {
@@ -2396,7 +2387,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
* Create the io queues
*/
- if (ctrl->queue_count > 1) {
+ if (ctrl->ctrl.queue_count > 1) {
if (ctrl->ctrl.state == NVME_CTRL_NEW)
ret = nvme_fc_create_io_queues(ctrl);
else
@@ -2408,19 +2399,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
- ctrl->ctrl.opts->nr_reconnects = 0;
+ ctrl->ctrl.nr_reconnects = 0;
- if (ctrl->queue_count > 1) {
- nvme_start_queues(&ctrl->ctrl);
- nvme_queue_scan(&ctrl->ctrl);
- nvme_queue_async_events(&ctrl->ctrl);
- }
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl);
- nvme_stop_keep_alive(&ctrl->ctrl);
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
@@ -2443,8 +2429,6 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
unsigned long flags;
- nvme_stop_keep_alive(&ctrl->ctrl);
-
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->flags |= FCCTRL_TERMIO;
ctrl->iocnt = 0;
@@ -2462,7 +2446,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* io requests back to the block layer as part of normal completions
* (but with error status).
*/
- if (ctrl->queue_count > 1) {
+ if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2485,7 +2469,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* use blk_mq_tagset_busy_itr() and the transport routine to
* terminate the exchanges.
*/
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2494,11 +2478,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
/* wait for all io that had to be aborted */
spin_lock_irqsave(&ctrl->lock, flags);
- while (ctrl->iocnt) {
- spin_unlock_irqrestore(&ctrl->lock, flags);
- msleep(1000);
- spin_lock_irqsave(&ctrl->lock, flags);
- }
+ wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
ctrl->flags &= ~FCCTRL_TERMIO;
spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -2528,9 +2508,10 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, delete_work);
- cancel_work_sync(&ctrl->reset_work);
+ cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
-
+ nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_remove_namespaces(&ctrl->ctrl);
/*
* kill the association on the link side. this will block
* waiting for io to terminate
@@ -2555,7 +2536,7 @@ __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
return true;
- if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_wq, &ctrl->delete_work))
return true;
return false;
@@ -2582,7 +2563,7 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
ret = __nvme_fc_del_ctrl(ctrl);
if (!ret)
- flush_workqueue(nvme_fc_wq);
+ flush_workqueue(nvme_wq);
nvme_put_ctrl(&ctrl->ctrl);
@@ -2607,13 +2588,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
- queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+ queue_delayed_work(nvme_wq, &ctrl->connect_work,
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
- ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+ ctrl->cnum, ctrl->ctrl.nr_reconnects);
WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
}
}
@@ -2622,9 +2603,10 @@ static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, reset_work);
+ container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
int ret;
+ nvme_stop_ctrl(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
@@ -2636,29 +2618,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
}
-/*
- * called by the nvme core layer, for sysfs interface that requests
- * a reset of the nvme controller
- */
-static int
-nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
-
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
-
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
- return -EBUSY;
-
- if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
- return -EBUSY;
-
- flush_work(&ctrl->reset_work);
-
- return 0;
-}
-
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@@ -2666,11 +2625,9 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
- .reset_ctrl = nvme_fc_reset_nvme_ctrl,
.free_ctrl = nvme_fc_nvme_ctrl_freed,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_del_nvme_ctrl,
- .get_subsysnqn = nvmf_get_subsysnqn,
.get_address = nvmf_get_address,
};
@@ -2696,7 +2653,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
.complete = nvme_fc_complete_rq,
- .init_request = nvme_fc_init_admin_request,
+ .init_request = nvme_fc_init_request,
.exit_request = nvme_fc_exit_request,
.reinit_request = nvme_fc_reinit_request,
.init_hctx = nvme_fc_init_admin_hctx,
@@ -2741,23 +2698,22 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
kref_init(&ctrl->ref);
INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
- INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
- ctrl->queue_count = min_t(unsigned int,
+ ctrl->ctrl.queue_count = min_t(unsigned int,
opts->nr_io_queues,
lport->ops->max_hw_queues);
- opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
- ctrl->queue_count++; /* +1 for admin queue */
+ ctrl->ctrl.queue_count++; /* +1 for admin queue */
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ret = -ENOMEM;
- ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
- GFP_KERNEL);
+ ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
+ sizeof(struct nvme_fc_queue), GFP_KERNEL);
if (!ctrl->queues)
goto out_free_ida;
@@ -2808,6 +2764,9 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
+ /* Remove core ctrl ref. */
+ nvme_put_ctrl(&ctrl->ctrl);
+
/* as we're past the point where we transition to the ref
* counting teardown path, if we return a bad pointer here,
* the calling routine, thinking it's prior to the
@@ -2846,66 +2805,70 @@ out_fail:
return ERR_PTR(ret);
}
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static struct nvme_ctrl *
@@ -2919,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
unsigned long flags;
int ret;
- ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
if (ret || !raddr.nn || !raddr.pn)
return ERR_PTR(-EINVAL);
- ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
if (ret || !laddr.nn || !laddr.pn)
return ERR_PTR(-EINVAL);
@@ -2966,20 +2929,7 @@ static struct nvmf_transport_ops nvme_fc_transport = {
static int __init nvme_fc_init_module(void)
{
- int ret;
-
- nvme_fc_wq = create_workqueue("nvme_fc_wq");
- if (!nvme_fc_wq)
- return -ENOMEM;
-
- ret = nvmf_register_transport(&nvme_fc_transport);
- if (ret)
- goto err;
-
- return 0;
-err:
- destroy_workqueue(nvme_fc_wq);
- return ret;
+ return nvmf_register_transport(&nvme_fc_transport);
}
static void __exit nvme_fc_exit_module(void)
@@ -2990,8 +2940,6 @@ static void __exit nvme_fc_exit_module(void)
nvmf_unregister_transport(&nvme_fc_transport);
- destroy_workqueue(nvme_fc_wq);
-
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
}