summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/core.c6
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c1
5 files changed, 9 insertions, 4 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index cf90713043da..eb9399ac97cf 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -529,6 +529,12 @@ fail:
}
EXPORT_SYMBOL_GPL(nvmet_req_init);
+void nvmet_req_uninit(struct nvmet_req *req)
+{
+ percpu_ref_put(&req->sq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+
static inline bool nvmet_cc_en(u32 cc)
{
return cc & 0x1;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 62eba29c85fb..2006fae61980 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
{
int cpu, idx, cnt;
- if (!(tgtport->ops->target_features &
- NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
- tgtport->ops->max_hw_queues == 1)
+ if (tgtport->ops->max_hw_queues == 1)
return WORK_CPU_UNBOUND;
/* Simple cpu selection based on qid modulo active cpu count */
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 15551ef79c8c..294a6611fb24 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = {
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
.target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
NVMET_FCTGTFEAT_OPDONE_IN_ISR,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7cb77ba5993b..cfc5c7fb0ab7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
+void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 99c69018a35f..9e45cde63376 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_req_uninit(&rsp->req);
nvmet_rdma_release_rsp(rsp);
if (wc->status != IB_WC_WR_FLUSH_ERR) {
pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",