diff options
author | Bart Van Assche <bart.vanassche@wdc.com> | 2018-07-18 19:25:25 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2018-07-25 01:06:36 +0300 |
commit | 73930595066c0996f39ab750cf9fc0963149a1e0 (patch) | |
tree | de83e00bccc38c41e5096f7cbde61024d0ddae09 /fs/cifs | |
parent | 23f96d1f15a70e2e8ba5449d1c77b634426c4b80 (diff) | |
download | linux-73930595066c0996f39ab750cf9fc0963149a1e0.tar.xz |
fs/cifs: Simplify ib_post_(send|recv|srq_recv)() calls
Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL
as third argument to ib_post_(send|recv|srq_recv)().
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'fs/cifs')
-rw-r--r-- | fs/cifs/smbdirect.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index c5a1cddd8856..c4a6c6976aab 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -801,7 +801,7 @@ out1: */ static int smbd_post_send_negotiate_req(struct smbd_connection *info) { - struct ib_send_wr send_wr, *send_wr_fail; + struct ib_send_wr send_wr; int rc = -ENOMEM; struct smbd_request *request; struct smbd_negotiate_req *packet; @@ -853,7 +853,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) request->has_payload = false; atomic_inc(&info->send_pending); - rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); + rc = ib_post_send(info->id->qp, &send_wr, NULL); if (!rc) return 0; @@ -1023,7 +1023,7 @@ static void smbd_destroy_header(struct smbd_connection *info, static int smbd_post_send(struct smbd_connection *info, struct smbd_request *request, bool has_payload) { - struct ib_send_wr send_wr, *send_wr_fail; + struct ib_send_wr send_wr; int rc, i; for (i = 0; i < request->num_sge; i++) { @@ -1054,7 +1054,7 @@ static int smbd_post_send(struct smbd_connection *info, atomic_inc(&info->send_pending); } - rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); + rc = ib_post_send(info->id->qp, &send_wr, NULL); if (rc) { log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); if (has_payload) { @@ -1183,7 +1183,7 @@ static int smbd_post_send_data( static int smbd_post_recv( struct smbd_connection *info, struct smbd_response *response) { - struct ib_recv_wr recv_wr, *recv_wr_fail = NULL; + struct ib_recv_wr recv_wr; int rc = -EIO; response->sge.addr = ib_dma_map_single( @@ -1202,7 +1202,7 @@ static int smbd_post_recv( recv_wr.sg_list = &response->sge; recv_wr.num_sge = 1; - rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail); + rc = ib_post_recv(info->id->qp, &recv_wr, NULL); if (rc) { ib_dma_unmap_single(info->id->device, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); @@ -2488,7 +2488,6 @@ struct smbd_mr *smbd_register_mr( int rc, i; enum dma_data_direction dir; struct ib_reg_wr *reg_wr; - struct ib_send_wr *bad_wr; if (num_pages > info->max_frmr_depth) { log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n", @@ -2562,7 +2561,7 @@ skip_multiple_pages: * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on the next ib_post_send when we actaully send I/O to remote peer */ - rc = ib_post_send(info->id->qp, ®_wr->wr, &bad_wr); + rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); if (!rc) return smbdirect_mr; @@ -2607,7 +2606,7 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) */ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) { - struct ib_send_wr *wr, *bad_wr; + struct ib_send_wr *wr; struct smbd_connection *info = smbdirect_mr->conn; int rc = 0; @@ -2622,7 +2621,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) wr->send_flags = IB_SEND_SIGNALED; init_completion(&smbdirect_mr->invalidate_done); - rc = ib_post_send(info->id->qp, wr, &bad_wr); + rc = ib_post_send(info->id->qp, wr, NULL); if (rc) { log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); |