From e622f2f4ad2142d2a613a57fb85f8cf737935ef5 Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <hch@lst.de>
Date: Thu, 8 Oct 2015 09:16:33 +0100
Subject: IB: split struct ib_send_wr

This patch split up struct ib_send_wr so that all non-trivial verbs
use their own structure which embedds struct ib_send_wr.  This dramaticly
shrinks the size of a WR for most common operations:

sizeof(struct ib_send_wr) (old):	96

sizeof(struct ib_send_wr):		48
sizeof(struct ib_rdma_wr):		64
sizeof(struct ib_atomic_wr):		96
sizeof(struct ib_ud_wr):		88
sizeof(struct ib_fast_reg_wr):		88
sizeof(struct ib_bind_mw_wr):		96
sizeof(struct ib_sig_handover_wr):	80

And with Sagi's pending MR rework the fast registration WR will also be
down to a reasonable size:

sizeof(struct ib_fastreg_wr):		64

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> [srp, srpt]
Reviewed-by: Chuck Lever <chuck.lever@oracle.com> [sunrpc]
Tested-by: Haggai Eran <haggaie@mellanox.com>
Tested-by: Sagi Grimberg <sagig@mellanox.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
---
 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 70 ++++++++++++++++-----------------
 1 file changed, 35 insertions(+), 35 deletions(-)

(limited to 'net/sunrpc/xprtrdma/svc_rdma_recvfrom.c')

diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cb5174284074..7be42d0da19e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -126,7 +126,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
 			u64 rs_offset,
 			bool last)
 {
-	struct ib_send_wr read_wr;
+	struct ib_rdma_wr read_wr;
 	int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
 	struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
 	int ret, read, pno;
@@ -179,16 +179,16 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
 		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
 
 	memset(&read_wr, 0, sizeof(read_wr));
-	read_wr.wr_id = (unsigned long)ctxt;
-	read_wr.opcode = IB_WR_RDMA_READ;
-	ctxt->wr_op = read_wr.opcode;
-	read_wr.send_flags = IB_SEND_SIGNALED;
-	read_wr.wr.rdma.rkey = rs_handle;
-	read_wr.wr.rdma.remote_addr = rs_offset;
-	read_wr.sg_list = ctxt->sge;
-	read_wr.num_sge = pages_needed;
-
-	ret = svc_rdma_send(xprt, &read_wr);
+	read_wr.wr.wr_id = (unsigned long)ctxt;
+	read_wr.wr.opcode = IB_WR_RDMA_READ;
+	ctxt->wr_op = read_wr.wr.opcode;
+	read_wr.wr.send_flags = IB_SEND_SIGNALED;
+	read_wr.rkey = rs_handle;
+	read_wr.remote_addr = rs_offset;
+	read_wr.wr.sg_list = ctxt->sge;
+	read_wr.wr.num_sge = pages_needed;
+
+	ret = svc_rdma_send(xprt, &read_wr.wr);
 	if (ret) {
 		pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
 		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
@@ -218,9 +218,9 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
 			 u64 rs_offset,
 			 bool last)
 {
-	struct ib_send_wr read_wr;
+	struct ib_rdma_wr read_wr;
 	struct ib_send_wr inv_wr;
-	struct ib_send_wr fastreg_wr;
+	struct ib_fast_reg_wr fastreg_wr;
 	u8 key;
 	int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
 	struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
@@ -289,31 +289,31 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
 
 	/* Prepare FASTREG WR */
 	memset(&fastreg_wr, 0, sizeof(fastreg_wr));
-	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
-	fastreg_wr.send_flags = IB_SEND_SIGNALED;
-	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
-	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
-	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
-	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-	fastreg_wr.wr.fast_reg.length = frmr->map_len;
-	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
-	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
-	fastreg_wr.next = &read_wr;
+	fastreg_wr.wr.opcode = IB_WR_FAST_REG_MR;
+	fastreg_wr.wr.send_flags = IB_SEND_SIGNALED;
+	fastreg_wr.iova_start = (unsigned long)frmr->kva;
+	fastreg_wr.page_list = frmr->page_list;
+	fastreg_wr.page_list_len = frmr->page_list_len;
+	fastreg_wr.page_shift = PAGE_SHIFT;
+	fastreg_wr.length = frmr->map_len;
+	fastreg_wr.access_flags = frmr->access_flags;
+	fastreg_wr.rkey = frmr->mr->lkey;
+	fastreg_wr.wr.next = &read_wr.wr;
 
 	/* Prepare RDMA_READ */
 	memset(&read_wr, 0, sizeof(read_wr));
-	read_wr.send_flags = IB_SEND_SIGNALED;
-	read_wr.wr.rdma.rkey = rs_handle;
-	read_wr.wr.rdma.remote_addr = rs_offset;
-	read_wr.sg_list = ctxt->sge;
-	read_wr.num_sge = 1;
+	read_wr.wr.send_flags = IB_SEND_SIGNALED;
+	read_wr.rkey = rs_handle;
+	read_wr.remote_addr = rs_offset;
+	read_wr.wr.sg_list = ctxt->sge;
+	read_wr.wr.num_sge = 1;
 	if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
-		read_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
-		read_wr.wr_id = (unsigned long)ctxt;
-		read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
+		read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
+		read_wr.wr.wr_id = (unsigned long)ctxt;
+		read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
 	} else {
-		read_wr.opcode = IB_WR_RDMA_READ;
-		read_wr.next = &inv_wr;
+		read_wr.wr.opcode = IB_WR_RDMA_READ;
+		read_wr.wr.next = &inv_wr;
 		/* Prepare invalidate */
 		memset(&inv_wr, 0, sizeof(inv_wr));
 		inv_wr.wr_id = (unsigned long)ctxt;
@@ -321,10 +321,10 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
 		inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
 		inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
 	}
-	ctxt->wr_op = read_wr.opcode;
+	ctxt->wr_op = read_wr.wr.opcode;
 
 	/* Post the chain */
-	ret = svc_rdma_send(xprt, &fastreg_wr);
+	ret = svc_rdma_send(xprt, &fastreg_wr.wr);
 	if (ret) {
 		pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
 		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
-- 
cgit v1.2.3