summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-02 19:46:24 +0300
committerJens Axboe <axboe@kernel.dk>2018-12-04 21:38:18 +0300
commitf9801a484ad6dcc33b10c61b143efc3352541802 (patch)
tree91699c9d5e4aee33d430be139dfa5b2e6b5d754a
parent3a7afd8ee42a68d4f24ab9c947a4ef82d4d52375 (diff)
downloadlinux-f9801a484ad6dcc33b10c61b143efc3352541802.tar.xz
nvme-rdma: remove I/O polling support
The code was always a bit of a hack that digs far too much into RDMA core internals. Lets kick it out and reimplement proper dedicated poll queues as needed. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/nvme/host/rdma.c24
1 files changed, 0 insertions, 24 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3ca096c1a506..75c01d20a133 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1738,29 +1738,6 @@ err:
return BLK_STS_IOERR;
}
-static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
-{
- struct nvme_rdma_queue *queue = hctx->driver_data;
- struct ib_cq *cq = queue->ib_cq;
- struct ib_wc wc;
- int found = 0;
-
- while (ib_poll_cq(cq, 1, &wc) > 0) {
- struct ib_cqe *cqe = wc.wr_cqe;
-
- if (cqe) {
- if (cqe->done == nvme_rdma_recv_done) {
- nvme_rdma_recv_done(cq, &wc);
- found++;
- } else {
- cqe->done(cq, &wc);
- }
- }
- }
-
- return found;
-}
-
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
@@ -1782,7 +1759,6 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.init_hctx = nvme_rdma_init_hctx,
- .poll = nvme_rdma_poll,
.timeout = nvme_rdma_timeout,
.map_queues = nvme_rdma_map_queues,
};