diff options
Diffstat (limited to 'drivers/nvme/host/tcp.c')
| -rw-r--r-- | drivers/nvme/host/tcp.c | 36 | 
1 files changed, 27 insertions, 9 deletions
| diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 69f59d2c5799..75435cdb156c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,  	 * directly, otherwise queue io_work. Also, only do that if we  	 * are on the same cpu, so we don't introduce contention.  	 */ -	if (queue->io_cpu == __smp_processor_id() && +	if (queue->io_cpu == raw_smp_processor_id() &&  	    sync && empty && mutex_trylock(&queue->send_mutex)) {  		queue->more_requests = !last;  		nvme_tcp_send_all(queue); @@ -417,6 +417,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,  {  	struct nvme_tcp_ctrl *ctrl = set->driver_data;  	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); +	struct nvme_tcp_cmd_pdu *pdu;  	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;  	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];  	u8 hdgst = nvme_tcp_hdgst_len(queue); @@ -427,8 +428,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,  	if (!req->pdu)  		return -ENOMEM; +	pdu = req->pdu;  	req->queue = queue;  	nvme_req(rq)->ctrl = &ctrl->ctrl; +	nvme_req(rq)->cmd = &pdu->cmd;  	return 0;  } @@ -568,6 +571,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,  	req->pdu_len = le32_to_cpu(pdu->r2t_length);  	req->pdu_sent = 0; +	if (unlikely(!req->pdu_len)) { +		dev_err(queue->ctrl->ctrl.device, +			"req %d r2t len is %u, probably a bug...\n", +			rq->tag, req->pdu_len); +		return -EPROTO; +	} +  	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {  		dev_err(queue->ctrl->ctrl.device,  			"req %d r2t len %u exceeded data len %u (%zu sent)\n", @@ -867,7 +877,7 @@ static void nvme_tcp_state_change(struct sock *sk)  {  	struct nvme_tcp_queue *queue; -	read_lock(&sk->sk_callback_lock); +	read_lock_bh(&sk->sk_callback_lock);  	queue = sk->sk_user_data;  	if (!queue)  		goto done; @@ -888,7 +898,7 @@ static void nvme_tcp_state_change(struct sock *sk)  	queue->state_change(sk);  done: -	read_unlock(&sk->sk_callback_lock); +	read_unlock_bh(&sk->sk_callback_lock);  }  static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) @@ -1575,7 +1585,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,  		memset(set, 0, sizeof(*set));  		set->ops = &nvme_tcp_admin_mq_ops;  		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; -		set->reserved_tags = 2; /* connect + keep-alive */ +		set->reserved_tags = NVMF_RESERVED_TAGS;  		set->numa_node = nctrl->numa_node;  		set->flags = BLK_MQ_F_BLOCKING;  		set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1587,7 +1597,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,  		memset(set, 0, sizeof(*set));  		set->ops = &nvme_tcp_mq_ops;  		set->queue_depth = nctrl->sqsize + 1; -		set->reserved_tags = 1; /* fabric connect */ +		set->reserved_tags = NVMF_RESERVED_TAGS;  		set->numa_node = nctrl->numa_node;  		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;  		set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1745,8 +1755,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)  		return ret;  	ctrl->queue_count = nr_io_queues + 1; -	if (ctrl->queue_count < 2) -		return 0; +	if (ctrl->queue_count < 2) { +		dev_err(ctrl->device, +			"unable to set any I/O queues\n"); +		return -ENOMEM; +	}  	dev_info(ctrl->device,  		"creating %d I/O queues.\n", nr_io_queues); @@ -1875,7 +1888,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)  	blk_mq_unquiesce_queue(ctrl->admin_q); -	error = nvme_init_identify(ctrl); +	error = nvme_init_ctrl_finish(ctrl);  	if (error)  		goto out_quiesce_queue; @@ -1963,6 +1976,11 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)  		goto destroy_admin;  	} +	if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) { +		dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); +		goto destroy_admin; +	} +  	if (opts->queue_size > ctrl->sqsize + 1)  		dev_warn(ctrl->device,  			"queue_size %zu > ctrl sqsize %u, clamping down\n", @@ -2259,7 +2277,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,  	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;  	blk_status_t ret; -	ret = nvme_setup_cmd(ns, rq, &pdu->cmd); +	ret = nvme_setup_cmd(ns, rq);  	if (ret)  		return ret; | 
