summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-17 19:31:49 +0300
committerKeith Busch <keith.busch@intel.com>2018-05-18 23:41:36 +0300
commitd1f06f4ae0410f8e5025f3c9129a52b86579e174 (patch)
tree19443f23e425386a3b6f88bdc215cd78debec9de /drivers
parentf9dde187fa921c12a8680089a77595b866e65455 (diff)
downloadlinux-d1f06f4ae0410f8e5025f3c9129a52b86579e174.tar.xz
nvme-pci: move ->cq_vector == -1 check outside of ->q_lock
We only clear it dynamically in nvme_suspend_queue(). When we do, ensure to do a full flush so that any nvme_queue_rq() invocation will see it. Ideally we'd kill this check completely, but we're using it to flush requests on a dying queue. Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/pci.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 577570d3e1f4..3dfedc84a921 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -872,6 +872,13 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command cmnd;
blk_status_t ret;
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(nvmeq->cq_vector < 0))
+ return BLK_STS_IOERR;
+
ret = nvme_setup_cmd(ns, req, &cmnd);
if (ret)
return ret;
@@ -889,11 +896,6 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);
- if (unlikely(nvmeq->cq_vector < 0)) {
- ret = BLK_STS_IOERR;
- spin_unlock_irq(&nvmeq->q_lock);
- goto out_cleanup_iod;
- }
__nvme_submit_cmd(nvmeq, &cmnd);
spin_unlock_irq(&nvmeq->q_lock);
return BLK_STS_OK;
@@ -1321,6 +1323,12 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
+ /*
+ * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
+ * having to grab the lock.
+ */
+ mb();
+
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);