summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2016-07-28 18:04:09 +0300
committerSagi Grimberg <sagi@grimberg.me>2016-08-04 17:44:40 +0300
commit40e64e07213201710a51e270595d6e6c028f9502 (patch)
treef947b259e8c9c1a1d183603ead13d2c386ae8ec8 /drivers/nvme/target
parentd8f7750a08968b105056328652d2c332bdfa062d (diff)
downloadlinux-40e64e07213201710a51e270595d6e6c028f9502.tar.xz
nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads
Under extreme conditions this might cause data corruptions. By doing that we we repost the buffer and then post this buffer for the device to send. If we happen to use shared receive queues the device might write to the buffer before it sends it (there is no ordering between send and recv queues). Without SRQs we probably won't get that if the host doesn't mis-behave and send more than we allowed it, but relying on that is not really a good idea. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/rdma.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 48c811850c29..b4d648536c3e 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -616,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
if (!len)
return 0;
- /* use the already allocated data buffer if possible */
- if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
- nvmet_rdma_use_inline_sg(rsp, len, 0);
- } else {
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
- }
+ status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+ len);
+ if (status)
+ return status;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,