diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 18:24:57 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 18:31:58 +0300 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/infiniband/hw/mlx5 | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) | |
download | linux-03fe2debbb2771fb90881e4ce8109b09cf772a5c.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 34 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 15 |
7 files changed, 88 insertions, 52 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index c4c7b82f4ac1..94a27d89a303 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -221,7 +221,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -236,10 +235,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { @@ -1188,7 +1189,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 61cc3d7db257..0e04fdddf670 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -30,12 +30,15 @@ static const struct mlx5_ib_profile rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, + NULL, + mlx5_ib_stage_pre_ib_reg_umr_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, - mlx5_ib_stage_umr_res_init, - mlx5_ib_stage_umr_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, + mlx5_ib_stage_post_ib_reg_umr_init, + NULL), STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, mlx5_ib_stage_class_attr_init, NULL), diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d9474b95d8e5..390e4375647e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -256,12 +256,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; + if (!mlx5_core_mp_enabled(ibdev->mdev) || + ll != IB_LINK_LAYER_ETHERNET) { + if (native_port_num) + *native_port_num = ib_port_num; + return ibdev->mdev; + } + if (native_port_num) *native_port_num = 1; - if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) - return ibdev->mdev; - port = &ibdev->port[ib_port_num - 1]; if (!port) return NULL; @@ -3297,7 +3301,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = 0; + u8 port = (u8)work->param; if (mlx5_core_is_mp_slave(work->dev)) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3317,8 +3321,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_INITIALIZED: - port = (u8)work->param; - /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ @@ -3332,24 +3334,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = (u8)work->param; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = (u8)work->param; break; case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3361,7 +3358,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ibev.device = &ibdev->ib_dev; ibev.element.port_num = port; - if (port < 1 || port > ibdev->num_ports) { + if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); goto out; } @@ -4999,19 +4996,19 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) return ib_register_device(&dev->ib_dev, NULL); } -void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) { - ib_unregister_device(&dev->ib_dev); + destroy_umrc_res(dev); } -int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) { - return create_umr_res(dev); + ib_unregister_device(&dev->ib_dev); } -void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) { - destroy_umrc_res(dev); + return create_umr_res(dev); } static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) @@ -5130,12 +5127,15 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, + NULL, + mlx5_ib_stage_pre_ib_reg_umr_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, - mlx5_ib_stage_umr_res_init, - mlx5_ib_stage_umr_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, + mlx5_ib_stage_post_ib_reg_umr_init, + NULL), STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, mlx5_ib_stage_delay_drop_init, mlx5_ib_stage_delay_drop_cleanup), @@ -5172,12 +5172,15 @@ static const struct mlx5_ib_profile nic_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, + NULL, + mlx5_ib_stage_pre_ib_reg_umr_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, - mlx5_ib_stage_umr_res_init, - mlx5_ib_stage_umr_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, + mlx5_ib_stage_post_ib_reg_umr_init, + NULL), STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, mlx5_ib_stage_class_attr_init, NULL), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e0bad28e0f09..c33bf1523d67 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -742,8 +742,9 @@ enum mlx5_ib_stages { MLX5_IB_STAGE_CONG_DEBUGFS, MLX5_IB_STAGE_UAR, MLX5_IB_STAGE_BFREG, + MLX5_IB_STAGE_PRE_IB_REG_UMR, MLX5_IB_STAGE_IB_REG, - MLX5_IB_STAGE_UMR_RESOURCES, + MLX5_IB_STAGE_POST_IB_REG_UMR, MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_CLASS_ATTR, MLX5_IB_STAGE_REP_REG, @@ -1068,10 +1069,10 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev); void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); +void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev); int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); -int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev); -void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev); +int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev); int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); void __mlx5_ib_remove(struct mlx5_ib_dev *dev, const struct mlx5_ib_profile *profile, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a5fad3e87ff7..95a36e9ea552 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -839,7 +839,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, *umem = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); err = PTR_ERR_OR_ZERO(*umem); - if (err < 0) { + if (err) { + *umem = NULL; mlx5_ib_err(dev, "umem get failed (%d)\n", err); return err; } @@ -1416,6 +1417,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); + mr->umem = NULL; clean_mr(dev, mr); return err; } @@ -1499,14 +1501,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) u32 key = mr->mmkey.key; err = destroy_mkey(dev, mr); - kfree(mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", key, err); return err; } - } else { - mlx5_mr_cache_free(dev, mr); } return 0; @@ -1549,6 +1548,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) atomic_sub(npages, &dev->mdev->priv.reg_pages); } + if (!mr->allocated_from_cache) + kfree(mr); + else + mlx5_mr_cache_free(dev, mr); + return 0; } @@ -1817,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1829,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0e67e3682bca..85c612ac547a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1177,7 +1177,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ib_umem_release(sq->ubuffer.umem); } -static int get_rq_pas_size(void *qpc) +static size_t get_rq_pas_size(void *qpc) { u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); @@ -1193,7 +1193,8 @@ static int get_rq_pas_size(void *qpc) } static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, - struct mlx5_ib_rq *rq, void *qpin) + struct mlx5_ib_rq *rq, void *qpin, + size_t qpinlen) { struct mlx5_ib_qp *mqp = rq->base.container_mibqp; __be64 *pas; @@ -1202,9 +1203,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, void *rqc; void *wq; void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); - int inlen; + size_t rq_pas_size = get_rq_pas_size(qpc); + size_t inlen; int err; - u32 rq_pas_size = get_rq_pas_size(qpc); + + if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) + return -EINVAL; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; in = kvzalloc(inlen, GFP_KERNEL); @@ -1297,7 +1301,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, } static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, - u32 *in, + u32 *in, size_t inlen, struct ib_pd *pd) { struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1329,7 +1333,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; - err = create_raw_packet_qp_rq(dev, rq, in); + err = create_raw_packet_qp_rq(dev, rq, in, inlen); if (err) goto err_destroy_sq; @@ -1608,6 +1612,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 uidx = MLX5_IB_DEFAULT_UIDX; struct mlx5_ib_create_qp ucmd; struct mlx5_ib_qp_base *base; + int mlx5_st; void *qpc; u32 *in; int err; @@ -1616,6 +1621,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); + mlx5_st = to_mlx5_st(init_attr->qp_type); + if (mlx5_st < 0) + return -EINVAL; + if (init_attr->rwq_ind_tbl) { if (!udata) return -ENOSYS; @@ -1777,7 +1786,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, st, mlx5_st); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) @@ -1891,11 +1900,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, } } + if (inlen < 0) { + err = -EINVAL; + goto err; + } + if (init_attr->qp_type == IB_QPT_RAW_PACKET || qp->flags & MLX5_IB_QP_UNDERLAY) { qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); - err = create_raw_packet_qp(dev, qp, in, pd); + err = create_raw_packet_qp(dev, qp, in, inlen, pd); } else { err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); } @@ -3116,8 +3130,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_srq *srq; - int desc_size; - int buf_size; + size_t desc_size; + size_t buf_size; int err; struct mlx5_srq_attr in = {0}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); @@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) + return ERR_PTR(-EINVAL); desc_size = roundup_pow_of_two(desc_size); - desc_size = max_t(int, 32, desc_size); + desc_size = max_t(size_t, 32, desc_size); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) + return ERR_PTR(-EINVAL); srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", - desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, - srq->msrq.max_avail_gather); + if (buf_size < desc_size) + return ERR_PTR(-EINVAL); in.type = init_attr->srq_type; if (pd->uobject) |