summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp/iser/iser_initiator.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2015-10-13 19:12:58 +0300
committerDoug Ledford <dledford@redhat.com>2015-10-28 19:26:06 +0300
commitdd0107a08996c0ab8cac2b98ddbed5313e118e81 (patch)
tree1248e196a4cfc38d3c06b736171249e395e19fe7 /drivers/infiniband/ulp/iser/iser_initiator.c
parent6c760b3dd576329e776b353f2eaefbe2034361b9 (diff)
downloadlinux-dd0107a08996c0ab8cac2b98ddbed5313e118e81.tar.xz
IB/iser: set block queue_virt_boundary
The block layer can reliably guarantee that SG lists won't contain gaps (page unaligned) if a driver set the queue virt_boundary. With this setting the block layer will: - refuse merges if bios are not aligned to the virtual boundary - split bios/requests that are not aligned to the virtual boundary - or, bounce buffer SG_IOs that are not aligned to the virtual boundary Since iser is working in 4K page size, set the virt_boundary to 4K pages. With this setting, we can now safely remove the bounce buffering logic in iser. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_initiator.c')
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c51
1 files changed, 8 insertions, 43 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index d511879d8cdf..ffd00c420729 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -661,48 +661,14 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int is_rdma_data_aligned = 1;
- int is_rdma_prot_aligned = 1;
int prot_count = scsi_prot_sg_count(iser_task->sc);
- /* if we were reading, copy back to unaligned sglist,
- * anyway dma_unmap and free the copy
- */
- if (iser_task->data[ISER_DIR_IN].orig_sg) {
- is_rdma_data_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->data[ISER_DIR_IN],
- ISER_DIR_IN);
- }
-
- if (iser_task->data[ISER_DIR_OUT].orig_sg) {
- is_rdma_data_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->data[ISER_DIR_OUT],
- ISER_DIR_OUT);
- }
-
- if (iser_task->prot[ISER_DIR_IN].orig_sg) {
- is_rdma_prot_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->prot[ISER_DIR_IN],
- ISER_DIR_IN);
- }
-
- if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
- is_rdma_prot_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->prot[ISER_DIR_OUT],
- ISER_DIR_OUT);
- }
-
if (iser_task->dir[ISER_DIR_IN]) {
iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
- if (is_rdma_data_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_IN],
- DMA_FROM_DEVICE);
- if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_IN],
+ DMA_FROM_DEVICE);
+ if (prot_count)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_IN],
DMA_FROM_DEVICE);
@@ -710,11 +676,10 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
if (iser_task->dir[ISER_DIR_OUT]) {
iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
- if (is_rdma_data_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_OUT],
- DMA_TO_DEVICE);
- if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_OUT],
+ DMA_TO_DEVICE);
+ if (prot_count)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_OUT],
DMA_TO_DEVICE);