diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 22:44:48 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 22:44:48 +0300 |
commit | 9bd553929f68921be0f2014dd06561e0c8249a0d (patch) | |
tree | 720e556374e3500af9a0210178fabfc6bd0f754c /net | |
parent | 022ff62c3d8c3758d15ccc6b58615fd8f257ba85 (diff) | |
parent | 0a3173a5f09bc58a3638ecfd0a80bdbae55e123c (diff) | |
download | linux-9bd553929f68921be0f2014dd06561e0c8249a0d.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has been a large cycle for RDMA, with several major patch series
reworking parts of the core code.
- Rework the so-called 'gid cache' and internal APIs to use a kref'd
pointer to a struct instead of copying, push this upwards into the
callers and add more stuff to the struct. The new design avoids
some ugly races the old one suffered with. This is part of the
namespace enablement work as the new struct is learning to be
namespace aware.
- Various uapi cleanups, moving more stuff to include/uapi and fixing
some long standing bugs that have recently been discovered.
- Driver updates for mlx5, mlx4 i40iw, rxe, cxgb4, hfi1, usnic,
pvrdma, and hns
- Provide max_send_sge and max_recv_sge attributes to better support
HW where these values are asymmetric.
- mlx5 user API 'devx' allows sending commands directly to the device
FW, instead of trying to cram every wild and niche feature into the
common API. Sort of like what GPU does.
- Major write() and ioctl() API rework to cleanly support PCI device
hot unplug and advance the ioctl conversion work
- Sparse and compile warning cleanups
- Add 'const' to the ib_poll_cq() signature, and permit a NULL
'bad_wr', which is the common use case
- Various patches to avoid high order allocations across the stack
- SRQ support for cxgb4, hns and qedr
- Changes to IPoIB to better follow the netdev model for working with
struct net_device liftime"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (312 commits)
Revert "net/smc: Replace ib_query_gid with rdma_get_gid_attr"
RDMA/hns: Fix usage of bitmap allocation functions return values
IB/core: Change filter function return type from int to bool
IB/core: Update GID entries for netdevice whose mac address changes
IB/core: Add default GIDs of the bond master netdev
IB/core: Consider adding default GIDs of bond device
IB/core: Delete lower netdevice default GID entries in bonding scenario
IB/core: Avoid confusing del_netdev_default_ips
IB/core: Add comment for change upper netevent handling
qedr: Add user space support for SRQ
qedr: Add support for kernel mode SRQ's
qedr: Add wrapping generic structure for qpidr and adjust idr routines.
IB/mlx5: Fix leaking stack memory to userspace
Update the e-mail address of Bart Van Assche
IB/ucm: Fix compiling ucm.c
IB/uverbs: Do not check for device disassociation during ioctl
IB/uverbs: Remove struct uverbs_root_spec and all supporting code
IB/uverbs: Use uverbs_api to unmarshal ioctl commands
IB/uverbs: Use uverbs_alloc for allocations
IB/uverbs: Add a simple allocator to uverbs_attr_bundle
...
Diffstat (limited to 'net')
-rw-r--r-- | net/9p/trans_rdma.c | 8 | ||||
-rw-r--r-- | net/core/secure_seq.c | 1 | ||||
-rw-r--r-- | net/rds/ib.c | 2 | ||||
-rw-r--r-- | net/rds/ib_frmr.c | 11 | ||||
-rw-r--r-- | net/rds/ib_recv.c | 6 | ||||
-rw-r--r-- | net/rds/ib_send.c | 6 | ||||
-rw-r--r-- | net/smc/smc_core.c | 1 | ||||
-rw-r--r-- | net/smc/smc_ib.c | 1 | ||||
-rw-r--r-- | net/smc/smc_tx.c | 3 | ||||
-rw-r--r-- | net/smc/smc_wr.c | 9 | ||||
-rw-r--r-- | net/smc/smc_wr.h | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 7 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 5 |
18 files changed, 33 insertions, 45 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 3d414acb7015..b06286f253cb 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -396,7 +396,7 @@ static int post_recv(struct p9_client *client, struct p9_rdma_context *c) { struct p9_trans_rdma *rdma = client->trans; - struct ib_recv_wr wr, *bad_wr; + struct ib_recv_wr wr; struct ib_sge sge; c->busa = ib_dma_map_single(rdma->cm_id->device, @@ -415,7 +415,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) wr.wr_cqe = &c->cqe; wr.sg_list = &sge; wr.num_sge = 1; - return ib_post_recv(rdma->qp, &wr, &bad_wr); + return ib_post_recv(rdma->qp, &wr, NULL); error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); @@ -425,7 +425,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) static int rdma_request(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_rdma *rdma = client->trans; - struct ib_send_wr wr, *bad_wr; + struct ib_send_wr wr; struct ib_sge sge; int err = 0; unsigned long flags; @@ -520,7 +520,7 @@ dont_need_post_recv: * status in case of a very fast reply. */ req->status = REQ_STATUS_SENT; - err = ib_post_send(rdma->qp, &wr, &bad_wr); + err = ib_post_send(rdma->qp, &wr, NULL); if (err) goto send_error; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 7232274de334..af6ad467ed61 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -140,6 +140,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr, &net_secret); return seq_scale(hash); } +EXPORT_SYMBOL_GPL(secure_tcp_seq); u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) { diff --git a/net/rds/ib.c b/net/rds/ib.c index 89c6333ecd39..c1d97640c0be 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -144,7 +144,7 @@ static void rds_ib_add_one(struct ib_device *device) INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); rds_ibdev->max_wrs = device->attrs.max_qp_wr; - rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE); + rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE); has_fr = (device->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS); diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 8596eed6d9a8..6431a023ac89 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -103,7 +103,6 @@ static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop) static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) { struct rds_ib_frmr *frmr = &ibmr->u.frmr; - struct ib_send_wr *failed_wr; struct ib_reg_wr reg_wr; int ret, off = 0; @@ -136,9 +135,7 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) IB_ACCESS_REMOTE_WRITE; reg_wr.wr.send_flags = IB_SEND_SIGNALED; - failed_wr = ®_wr.wr; - ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, &failed_wr); - WARN_ON(failed_wr != ®_wr.wr); + ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, NULL); if (unlikely(ret)) { /* Failure here can be because of -ENOMEM as well */ frmr->fr_state = FRMR_IS_STALE; @@ -231,7 +228,7 @@ out_unmap: static int rds_ib_post_inv(struct rds_ib_mr *ibmr) { - struct ib_send_wr *s_wr, *failed_wr; + struct ib_send_wr *s_wr; struct rds_ib_frmr *frmr = &ibmr->u.frmr; struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id; int ret = -EINVAL; @@ -256,9 +253,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr) s_wr->ex.invalidate_rkey = frmr->mr->rkey; s_wr->send_flags = IB_SEND_SIGNALED; - failed_wr = s_wr; - ret = ib_post_send(i_cm_id->qp, s_wr, &failed_wr); - WARN_ON(failed_wr != s_wr); + ret = ib_post_send(i_cm_id->qp, s_wr, NULL); if (unlikely(ret)) { frmr->fr_state = FRMR_IS_STALE; frmr->fr_inv = false; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index d300186b8dc0..2f16146e4ec9 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -381,7 +381,6 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_recv_work *recv; - struct ib_recv_wr *failed_wr; unsigned int posted = 0; int ret = 0; bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM); @@ -415,7 +414,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) &recv->r_frag->f_sg)); /* XXX when can this fail? */ - ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); + ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); if (ret) { rds_ib_conn_error(conn, "recv post on " "%pI6c returned %d, disconnecting and " @@ -648,7 +647,6 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) { struct rds_header *hdr = ic->i_ack; - struct ib_send_wr *failed_wr; u64 seq; int ret; @@ -661,7 +659,7 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi rds_message_make_checksum(hdr); ic->i_ack_queued = jiffies; - ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); + ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); if (unlikely(ret)) { /* Failed to send. Release the WR, and * force another ACK. diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index c8dd3125d398..2dcb555e6350 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -492,7 +492,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, struct rds_ib_send_work *send = NULL; struct rds_ib_send_work *first; struct rds_ib_send_work *prev; - struct ib_send_wr *failed_wr; + const struct ib_send_wr *failed_wr; struct scatterlist *scat; u32 pos; u32 i; @@ -758,7 +758,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; - struct ib_send_wr *failed_wr; + const struct ib_send_wr *failed_wr; u32 pos; u32 work_alloc; int ret; @@ -846,7 +846,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) struct rds_ib_send_work *send = NULL; struct rds_ib_send_work *first; struct rds_ib_send_work *prev; - struct ib_send_wr *failed_wr; + const struct ib_send_wr *failed_wr; struct scatterlist *scat; unsigned long len; u64 remote_addr = op->op_remote_addr; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index a46418f45ecd..e871368500e3 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -16,6 +16,7 @@ #include <net/tcp.h> #include <net/sock.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> #include "smc.h" #include "smc_clc.h" diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 2cc64bc8ae20..9bb5274a244e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -16,6 +16,7 @@ #include <linux/workqueue.h> #include <linux/scatterlist.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> #include "smc_pnet.h" #include "smc_ib.h" diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 2f5e324e54b9..d8366ed51757 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -270,7 +270,6 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, int num_sges, struct ib_sge sges[]) { struct smc_link_group *lgr = conn->lgr; - struct ib_send_wr *failed_wr = NULL; struct ib_rdma_wr rdma_wr; struct smc_link *link; int rc; @@ -288,7 +287,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, /* offset within RMBE */ peer_rmbe_offset; rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; - rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr); + rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); if (rc) { conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; smc_lgr_terminate(lgr); diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index f856b8402b3f..3c458d279855 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -232,15 +232,13 @@ int smc_wr_tx_put_slot(struct smc_link *link, */ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) { - struct ib_send_wr *failed_wr = NULL; struct smc_wr_tx_pend *pend; int rc; ib_req_notify_cq(link->smcibdev->roce_cq_send, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); pend = container_of(priv, struct smc_wr_tx_pend, priv); - rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], - &failed_wr); + rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL); if (rc) { smc_wr_tx_put_slot(link, priv); smc_lgr_terminate(smc_get_lgr(link)); @@ -251,7 +249,6 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) /* Register a memory region and wait for result. */ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) { - struct ib_send_wr *failed_wr = NULL; int rc; ib_req_notify_cq(link->smcibdev->roce_cq_send, @@ -260,9 +257,7 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; link->wr_reg.mr = mr; link->wr_reg.key = mr->rkey; - failed_wr = &link->wr_reg.wr; - rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, &failed_wr); - WARN_ON(failed_wr != &link->wr_reg.wr); + rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL); if (rc) return rc; diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 210bec3c3ebe..1d85bb14fd6f 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -63,7 +63,6 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val) /* post a new receive work request to fill a completed old work request entry */ static inline int smc_wr_rx_post(struct smc_link *link) { - struct ib_recv_wr *bad_recv_wr = NULL; int rc; u64 wr_id, temp_wr_id; u32 index; @@ -72,7 +71,7 @@ static inline int smc_wr_rx_post(struct smc_link *link) temp_wr_id = wr_id; index = do_div(temp_wr_id, link->wr_rx_cnt); link->wr_rx_ibs[index].wr_id = wr_id; - rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], &bad_recv_wr); + rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], NULL); return rc; } diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 17fb1e025654..0f7c465d9a5a 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -279,9 +279,7 @@ out_maperr: static int fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *bad_wr; - - return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL); } /* Invalidate all memory regions that were registered for "req". diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index c040de196e13..1bb00dd6ccdb 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -464,7 +464,7 @@ out_mapmr_err: static int frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *post_wr, *bad_wr; + struct ib_send_wr *post_wr; struct rpcrdma_mr *mr; post_wr = &req->rl_sendctx->sc_wr; @@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) /* If ib_post_send fails, the next ->send_request for * @req will queue these MWs for recovery. */ - return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, post_wr, NULL); } /* Handle a remotely invalidated mr on the @mrs list @@ -517,7 +517,8 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) static void frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) { - struct ib_send_wr *first, **prev, *last, *bad_wr; + struct ib_send_wr *first, **prev, *last; + const struct ib_send_wr *bad_wr; struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 841fca143804..2ef75e885411 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { - struct ib_recv_wr *bad_recv_wr; int ret; svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr); + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); if (ret) goto err_post; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index ce3ea8419704..04cb3363172a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -307,7 +307,8 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) { struct svcxprt_rdma *rdma = cc->cc_rdma; struct svc_xprt *xprt = &rdma->sc_xprt; - struct ib_send_wr *first_wr, *bad_wr; + struct ib_send_wr *first_wr; + const struct ib_send_wr *bad_wr; struct list_head *tmp; struct ib_cqe *cqe; int ret; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4a3efaea277c..ffef0c508f1a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) */ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) { - struct ib_send_wr *bad_wr; int ret; might_sleep(); @@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); + ret = ib_post_send(rdma->sc_qp, wr, NULL); trace_svcrdma_post_send(wr, ret); if (ret) { set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index e9535a66bab0..547b2cdf1427 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -476,7 +476,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) /* Qualify the transport resource defaults with the * capabilities of this particular device */ - newxprt->sc_max_send_sges = dev->attrs.max_sge; + newxprt->sc_max_send_sges = dev->attrs.max_send_sge; /* transport hdr, head iovec, one page list entry, tail iovec */ if (newxprt->sc_max_send_sges < 4) { pr_err("svcrdma: too few Send SGEs available (%d)\n", diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 16161a36dc73..5efeba08918b 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -508,7 +508,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, unsigned int max_sge; int rc; - max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, + max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge, RPCRDMA_MAX_SEND_SGES); if (max_sge < RPCRDMA_MIN_SEND_SGES) { pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); @@ -1559,7 +1559,8 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) if (!count) return; - rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, &bad_wr); + rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, + (const struct ib_recv_wr **)&bad_wr); if (rc) { for (wr = bad_wr; wr; wr = wr->next) { struct rpcrdma_rep *rep; |