summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2026-03-07 00:56:25 +0300
committerTrond Myklebust <trond.myklebust@hammerspace.com>2026-04-13 21:56:27 +0300
commit6f2e565fb3bd68636e4920223e599d70861f8ba6 (patch)
treef08e6be551fbc2e0c22c0359bd9df36015d17647
parent765bde47fe7f197dabeb12da76831f40d0b20377 (diff)
downloadlinux-6f2e565fb3bd68636e4920223e599d70861f8ba6.tar.xz
xprtrdma: Decouple frwr_wp_create from frwr_map
frwr_wp_create is the only caller of frwr_map outside the encode path. It registers a single 4-byte write-pad region from a stack- local rpcrdma_mr_seg. Inlining the registration logic directly (sg_init_table + sg_set_page + ib_dma_map_sg + ib_map_mr_sg + IOVA mangle + reg_wr setup) eliminates the coupling that would otherwise complicate the removal of rpcrdma_mr_seg from frwr_map's interface. The inlined version adds a proper error-unwind ladder: on failure, the DMA mapping (if established) is released, ep->re_write_pad_mr is cleared, and the MR is returned to the transport free list. The old frwr_map-based code relied on rpcrdma_mrs_destroy at teardown to reclaim partially-initialized MRs. This is a one-time setup path; duplicating ~20 lines is a reasonable tradeoff for decoupling the write-pad registration from the data- path MR registration. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c57
1 files changed, 50 insertions, 7 deletions
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 31434aeb8e29..4331b0b65f4c 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -669,9 +669,13 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
- struct rpcrdma_mr_seg seg;
+ struct ib_reg_wr *reg_wr;
struct rpcrdma_mr *mr;
+ struct ib_mr *ibmr;
+ int dma_nents;
+ int ret;
mr = rpcrdma_mr_get(r_xprt);
if (!mr)
@@ -679,11 +683,39 @@ int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
mr->mr_req = NULL;
ep->re_write_pad_mr = mr;
- seg.mr_len = XDR_UNIT;
- seg.mr_page = virt_to_page(ep->re_write_pad);
- seg.mr_offset = offset_in_page(ep->re_write_pad);
- if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr)))
- return -EIO;
+ sg_init_table(mr->mr_sg, 1);
+ sg_set_page(mr->mr_sg, virt_to_page(ep->re_write_pad),
+ XDR_UNIT, offset_in_page(ep->re_write_pad));
+
+ mr->mr_dir = DMA_FROM_DEVICE;
+ mr->mr_nents = 1;
+ dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg,
+ mr->mr_nents, mr->mr_dir);
+ if (!dma_nents) {
+ ret = -EIO;
+ goto out_mr;
+ }
+ mr->mr_device = ep->re_id->device;
+
+ ibmr = mr->mr_ibmr;
+ if (ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL,
+ PAGE_SIZE) != dma_nents) {
+ ret = -EIO;
+ goto out_unmap;
+ }
+
+ /* IOVA is not tagged with an XID; the write-pad is not RPC-specific. */
+ ib_update_fast_reg_key(ibmr, ib_inc_rkey(ibmr->rkey));
+
+ reg_wr = &mr->mr_regwr;
+ reg_wr->mr = ibmr;
+ reg_wr->key = ibmr->rkey;
+ reg_wr->access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
+
+ mr->mr_handle = ibmr->rkey;
+ mr->mr_length = ibmr->length;
+ mr->mr_offset = ibmr->iova;
+
trace_xprtrdma_mr_fastreg(mr);
mr->mr_cqe.done = frwr_wc_fastreg;
@@ -693,5 +725,16 @@ int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
mr->mr_regwr.wr.send_flags = 0;
- return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
+ ret = ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
+ if (!ret)
+ return 0;
+
+out_unmap:
+ frwr_mr_unmap(mr);
+out_mr:
+ ep->re_write_pad_mr = NULL;
+ spin_lock(&buf->rb_lock);
+ rpcrdma_mr_push(mr, &buf->rb_mrs);
+ spin_unlock(&buf->rb_lock);
+ return ret;
}