From 7d11b4787d538295f65dcd002954a0f8ed2c393c Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Thu, 27 Aug 2020 17:16:55 +0300 Subject: RDMA/qedr: Fix reported max_pkeys MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As qedr driver supports both RoCE and iWarp, make sure to set the max_pkeys only when running in RoCE mode. Link: https://lore.kernel.org/r/20200827141655.406185-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Acked-by: Michal KalderonĀ  Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b49bef94637e..0bdfa300865d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -157,7 +157,7 @@ int qedr_query_device(struct ib_device *ibdev, attr->local_ca_ack_delay = qattr->dev_ack_delay; attr->max_fast_reg_page_list_len = qattr->max_mr / 8; - attr->max_pkeys = QEDR_ROCE_PKEY_MAX; + attr->max_pkeys = qattr->max_pkey; attr->max_ah = qattr->max_ah; return 0; -- cgit v1.2.3 From 91a7c58fce065506fd98954d27694e5d83c96638 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 7 Sep 2020 15:09:13 +0300 Subject: RDMA: Restore ability to fail on PD deallocate The IB verbs objects are counted by the kernel and ib_core ensures that deallocate PD will success so it will be called once all other objects that depends on PD will be released. This is achieved by managing various reference counters on such objects. The mlx5 driver didn't follow this standard flow when allowed DEVX objects that are not managed by ib_core to be interleaved with the ones under ib_core responsibility. In such interleaved scenarios deallocate command can fail and ib_core will leave uobject in internal DB and attempt to clean it later to free resources anyway. This change partially restores returned value from dealloc_pd() for all drivers, but keeping in mind that non-DEVX devices and kernel verbs paths shouldn't fail. Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core") Link: https://lore.kernel.org/r/20200907120921.476363-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_std_types.c | 3 +-- drivers/infiniband/core/verbs.c | 8 ++++++-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 3 ++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 2 +- drivers/infiniband/hw/cxgb4/provider.c | 3 ++- drivers/infiniband/hw/efa/efa.h | 2 +- drivers/infiniband/hw/efa/efa_verbs.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_pd.c | 3 ++- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 3 ++- drivers/infiniband/hw/mlx4/main.c | 3 ++- drivers/infiniband/hw/mlx5/cmd.c | 4 ++-- drivers/infiniband/hw/mlx5/cmd.h | 2 +- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/mthca/mthca_provider.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 5 +++-- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 2 +- drivers/infiniband/hw/qedr/verbs.c | 3 ++- drivers/infiniband/hw/qedr/verbs.h | 2 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 3 ++- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 5 +++-- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- drivers/infiniband/sw/rdmavt/pd.c | 3 ++- drivers/infiniband/sw/rdmavt/pd.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.h | 2 +- include/rdma/ib_verbs.h | 13 +++++-------- 29 files changed, 56 insertions(+), 42 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 08c39cfb1bd9..2932e832f48f 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -122,8 +122,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject, if (ret) return ret; - ib_dealloc_pd_user(pd, &attrs->driver_udata); - return 0; + return ib_dealloc_pd_user(pd, &attrs->driver_udata); } void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3096e73797b7..a4a2cd378cb4 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -329,7 +329,7 @@ EXPORT_SYMBOL(__ib_alloc_pd); * exist. The caller is responsible to synchronously destroy them and * guarantee no new allocations will happen. */ -void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) +int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) { int ret; @@ -343,9 +343,13 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) requires the caller to guarantee we can't race here. */ WARN_ON(atomic_read(&pd->usecnt)); + ret = pd->device->ops.dealloc_pd(pd, udata); + if (ret) + return ret; + rdma_restrack_del(&pd->res); - pd->device->ops.dealloc_pd(pd, udata); kfree(pd); + return ret; } EXPORT_SYMBOL(ib_dealloc_pd_user); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 5ee272d27aaa..c53f6e329d84 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -532,7 +532,7 @@ fail: } /* Protection Domains */ -void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) +int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; @@ -542,6 +542,7 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) if (pd->qplib_pd.id) bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, &pd->qplib_pd); + return 0; } int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 1daeb30e06fd..d9e2e406f66a 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -163,7 +163,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, u8 port_num); int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 6c579d2d3997..5f2b30624512 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -190,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) return ret; } -static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) +static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_pd *php; @@ -202,6 +202,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur--; mutex_unlock(&rhp->rdev.stats.lock); + return 0; } static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 1889dd172a25..8547f9d543df 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -134,7 +134,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index, int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); -void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); struct ib_qp *efa_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index de9a22f0fcc2..383ce126d82f 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -383,13 +383,14 @@ err_out: return err; } -void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); struct efa_pd *pd = to_epd(ibpd); ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn); efa_pd_dealloc(dev, pd->pdn); + return 0; } static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 4f1dd916d05f..9688240d7fce 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1182,7 +1182,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index b10c50b8736e..98f69496adb4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -82,9 +82,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); + return 0; } int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index b51339328a51..c0f796cb6e5e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -328,12 +328,13 @@ error: * @ibpd: ptr of pd to be deallocated * @udata: user data or null for kernel object */ -static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +static int i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct i40iw_pd *iwpd = to_iwpd(ibpd); struct i40iw_device *iwdev = to_iwdev(ibpd->device); i40iw_rem_pdusecount(iwpd, iwdev); + return 0; } /** diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2543062c0cb0..1be0108db992 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1214,9 +1214,10 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); + return 0; } static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index ebb2f108b64f..f5aac53cebf0 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -209,14 +209,14 @@ void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn, mlx5_cmd_exec_in(dev, dealloc_transport_domain, in); } -void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid) +int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid) { u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {}; MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); MLX5_SET(dealloc_pd_in, in, pd, pdn); MLX5_SET(dealloc_pd_in, in, uid, uid); - mlx5_cmd_exec_in(dev, dealloc_pd, in); + return mlx5_cmd_exec_in(dev, dealloc_pd, in); } int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 1d192a8ca87d..ca3afa7d73a3 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -44,7 +44,7 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, u64 length, u32 alignment); void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); -void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); +int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid); void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d60d63221b14..bfa8b6b3c681 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2569,12 +2569,12 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); - mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); + return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); } static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 9fa2f9164a47..d3ed7c19b2ef 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -373,9 +373,10 @@ static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); + return 0; } static int mthca_ah_create(struct ib_ah *ibah, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index c1751c9a0f62..a9d2f7a40b51 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -664,7 +664,7 @@ exit: return status; } -void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); @@ -682,10 +682,11 @@ void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) if (is_ucontext_pd(uctx, pd)) { ocrdma_release_ucontext_pd(uctx); - return; + return 0; } } _ocrdma_dealloc_pd(dev, pd); + return 0; } static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index df8e3b923a44..4c85be43507c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -67,7 +67,7 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 0bdfa300865d..c81d1e547295 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -471,13 +471,14 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_pd *pd = get_qedr_pd(ibpd); DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); + return 0; } static void qedr_free_pbl(struct qedr_dev *dev, diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 39dd6286ba39..1b450919ba9c 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -47,7 +47,7 @@ void qedr_dealloc_ucontext(struct ib_ucontext *uctx); int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma); void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry); int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 02a49f661c8d..8af3212101be 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -449,9 +449,10 @@ int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } -void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); + return 0; } struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 9195f2b901ce..f8911c0330e2 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -49,7 +49,7 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid); int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); -void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 65ac3693ad12..678c94531e68 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -479,9 +479,9 @@ err: * @pd: the protection domain to be released * @udata: user data or null for kernel object * - * @return: 0 on success, otherwise errno. + * @return: Always 0 */ -void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +int pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req = {}; @@ -498,6 +498,7 @@ void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) ret); atomic_dec(&dev->num_pds); + return 0; } /** diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 699b20849a7e..7bf33a654275 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -399,7 +399,7 @@ int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void pvrdma_dealloc_ucontext(struct ib_ucontext *context); int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c index a403718f0b5e..01b7abf91520 100644 --- a/drivers/infiniband/sw/rdmavt/pd.c +++ b/drivers/infiniband/sw/rdmavt/pd.c @@ -95,11 +95,12 @@ bail: * * Return: always 0 */ -void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); + return 0; } diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h index 71ba76d72b1d..06a6a38beedc 100644 --- a/drivers/infiniband/sw/rdmavt/pd.h +++ b/drivers/infiniband/sw/rdmavt/pd.h @@ -51,6 +51,6 @@ #include int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); -void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); #endif /* DEF_RDMAVTPD_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 36edc294e105..7fe7316bd287 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -148,11 +148,12 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem); } -static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_pd *pd = to_rpd(ibpd); rxe_drop_ref(pd); + return 0; } static int rxe_create_ah(struct ib_ah *ibah, diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index adafa1b8bebe..2d2b6df0b027 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -234,12 +234,13 @@ int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) return 0; } -void siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(pd->device); siw_dbg_pd(pd, "free PD\n"); atomic_dec(&sdev->num_pd); + return 0; } void siw_qp_get_ref(struct ib_qp *base_qp) diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index d9572275a6b6..3dbab78579cb 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -49,7 +49,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port, int siw_query_gid(struct ib_device *base_dev, u8 port, int idx, union ib_gid *gid); int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); -void siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); +int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); struct ib_qp *siw_create_qp(struct ib_pd *base_pd, struct ib_qp_init_attr *attr, struct ib_udata *udata); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 3781d36df31c..a49aaebaed58 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2398,7 +2398,7 @@ struct ib_device_ops { void (*mmap_free)(struct rdma_user_mmap_entry *entry); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); - void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); + int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); @@ -3456,12 +3456,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, #define ib_alloc_pd(device, flags) \ __ib_alloc_pd((device), (flags), KBUILD_MODNAME) -/** - * ib_dealloc_pd_user - Deallocate kernel/user PD - * @pd: The protection domain - * @udata: Valid user data or NULL for kernel objects - */ -void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); +int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); /** * ib_dealloc_pd - Deallocate kernel PD @@ -3471,7 +3466,9 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); */ static inline void ib_dealloc_pd(struct ib_pd *pd) { - ib_dealloc_pd_user(pd, NULL); + int ret = ib_dealloc_pd_user(pd, NULL); + + WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail"); } enum rdma_create_ah_flags { -- cgit v1.2.3 From 9a9ebf8cd72b809405ad571fb2f635ffc9df2420 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 7 Sep 2020 15:09:14 +0300 Subject: RDMA: Restore ability to fail on AH destroy Like any other IB verbs objects, AH are refcounted by ib_core. The release of those objects are controlled by ib_core with promise that AH destroy can't fail. Being SW object for now, this change makes dealloc_ah() to behave like any other destroy IB flows. Fixes: d345691471b4 ("RDMA: Handle AH allocations by IB/core") Link: https://lore.kernel.org/r/20200907120921.476363-3-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 8 ++++++-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 3 ++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 2 +- drivers/infiniband/hw/efa/efa.h | 2 +- drivers/infiniband/hw/efa/efa_verbs.c | 5 +++-- drivers/infiniband/hw/hns/hns_roce_ah.c | 5 ----- drivers/infiniband/hw/hns/hns_roce_device.h | 5 ++++- drivers/infiniband/hw/mlx4/ah.c | 5 ----- drivers/infiniband/hw/mlx4/mlx4_ib.h | 5 ++++- drivers/infiniband/hw/mlx5/ah.c | 5 ----- drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 ++++- drivers/infiniband/hw/mthca/mthca_provider.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_ah.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_ah.h | 2 +- drivers/infiniband/hw/qedr/verbs.c | 3 ++- drivers/infiniband/hw/qedr/verbs.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 3 ++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- drivers/infiniband/sw/rdmavt/ah.c | 3 ++- drivers/infiniband/sw/rdmavt/ah.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 3 ++- include/rdma/ib_verbs.h | 8 +++++--- 22 files changed, 46 insertions(+), 38 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a4a2cd378cb4..bd345e7ce913 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -968,18 +968,22 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) { const struct ib_gid_attr *sgid_attr = ah->sgid_attr; struct ib_pd *pd; + int ret; might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); pd = ah->pd; - ah->device->ops.destroy_ah(ah, flags); + ret = ah->device->ops.destroy_ah(ah, flags); + if (ret) + return ret; + atomic_dec(&pd->usecnt); if (sgid_attr) rdma_put_gid_attr(sgid_attr); kfree(ah); - return 0; + return ret; } EXPORT_SYMBOL(rdma_destroy_ah_user); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c53f6e329d84..67ebf1996700 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -602,13 +602,14 @@ fail: } /* Address Handles */ -void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) +int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_dev *rdev = ah->rdev; bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, !(flags & RDMA_DESTROY_AH_SLEEPABLE)); + return 0; } static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index d9e2e406f66a..b6b56a92b78e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -168,7 +168,7 @@ int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); -void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); +int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); int bnxt_re_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 8547f9d543df..6b06ce87fbfc 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -156,7 +156,7 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry); int efa_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -void efa_destroy_ah(struct ib_ah *ibah, u32 flags); +int efa_destroy_ah(struct ib_ah *ibah, u32 flags); int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata); enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 383ce126d82f..a03e3514bd8a 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1873,7 +1873,7 @@ err_out: return err; } -void efa_destroy_ah(struct ib_ah *ibah, u32 flags) +int efa_destroy_ah(struct ib_ah *ibah, u32 flags) { struct efa_dev *dev = to_edev(ibah->pd->device); struct efa_ah *ah = to_eah(ibah); @@ -1883,10 +1883,11 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags) if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) { ibdev_dbg(&dev->ibdev, "Destroy address handle is not supported in atomic context\n"); - return; + return -EOPNOTSUPP; } efa_ah_destroy(dev, ah); + return 0; } struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 54cadbc0724e..75b06db60f7c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -116,8 +116,3 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) return 0; } - -void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags) -{ - return; -} diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 9688240d7fce..da3e8ed916f8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1179,7 +1179,10 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); -void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); +static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags) +{ + return 0; +} int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5f8f8d5c0ce0..7321d6ab5fe1 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -232,8 +232,3 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) return 0; } - -void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) -{ - return; -} diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index bcac8fc50317..6d51653edaf8 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -753,7 +753,10 @@ int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, int slave_sgid_index, u8 *s_mac, u16 vlan_tag); int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); -void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags); +static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) +{ + return 0; +} int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 4a60e693a04d..505bc47fd575 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -147,8 +147,3 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) return 0; } - -void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) -{ - return; -} diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5287fc868662..1e5f77d3e86b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1119,7 +1119,10 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); -void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags); +static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) +{ + return 0; +} int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata); int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index d3ed7c19b2ef..12b7c5349004 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -390,9 +390,10 @@ static int mthca_ah_create(struct ib_ah *ibah, init_attr->ah_attr, ah); } -static void mthca_ah_destroy(struct ib_ah *ah, u32 flags) +static int mthca_ah_destroy(struct ib_ah *ah, u32 flags) { mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); + return 0; } static int mthca_create_srq(struct ib_srq *ibsrq, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 6eea02b18968..699a8b719ed6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -215,12 +215,13 @@ av_err: return status; } -void ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags) +int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags) { struct ocrdma_ah *ah = get_ocrdma_ah(ibah); struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device); ocrdma_free_av(dev, ah); + return 0; } int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 8b73b3489f3a..35cf2e2ff391 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -53,7 +53,7 @@ enum { int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); +int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags, diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index c81d1e547295..f85e916bec7d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2767,11 +2767,12 @@ int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, return 0; } -void qedr_destroy_ah(struct ib_ah *ibah, u32 flags) +int qedr_destroy_ah(struct ib_ah *ibah, u32 flags) { struct qedr_ah *ah = get_qedr_ah(ibah); rdma_destroy_ah_attr(&ah->attr); + return 0; } static void free_mr_info(struct qedr_dev *dev, struct mr_info *info) diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 1b450919ba9c..1b4ed8d37f5e 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -72,7 +72,7 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_recv_wr); int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -void qedr_destroy_ah(struct ib_ah *ibah, u32 flags); +int qedr_destroy_ah(struct ib_ah *ibah, u32 flags); int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 678c94531e68..fc412cbfd042 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -548,9 +548,10 @@ int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, * @flags: destroy address handle flags (see enum rdma_destroy_ah_flags) * */ -void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags) +int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags) { struct pvrdma_dev *dev = to_vdev(ah->device); atomic_dec(&dev->num_ahs); + return 0; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 7bf33a654275..58b41a3e8b7e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -416,7 +416,7 @@ int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags); +int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags); int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c index 75a04b1497c4..b938c4ffa99a 100644 --- a/drivers/infiniband/sw/rdmavt/ah.c +++ b/drivers/infiniband/sw/rdmavt/ah.c @@ -132,7 +132,7 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, * * Return: 0 on success */ -void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) +int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) { struct rvt_dev_info *dev = ib_to_rvt(ibah->device); struct rvt_ah *ah = ibah_to_rvtah(ibah); @@ -143,6 +143,7 @@ void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) spin_unlock_irqrestore(&dev->n_ahs_lock, flags); rdma_destroy_ah_attr(&ah->attr); + return 0; } /** diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h index 40b7123fec76..5a85edd06491 100644 --- a/drivers/infiniband/sw/rdmavt/ah.h +++ b/drivers/infiniband/sw/rdmavt/ah.h @@ -52,7 +52,7 @@ int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); +int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 7fe7316bd287..c346b0295a99 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -201,11 +201,12 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) return 0; } -static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags) +static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); rxe_drop_ref(ah); + return 0; } static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a49aaebaed58..71b145f569a8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2403,7 +2403,7 @@ struct ib_device_ops { struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); - void (*destroy_ah)(struct ib_ah *ah, u32 flags); + int (*destroy_ah)(struct ib_ah *ah, u32 flags); int (*create_srq)(struct ib_srq *srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata); @@ -3596,9 +3596,11 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); * * NOTE: for user ah use rdma_destroy_ah_user with valid udata! */ -static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) +static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags) { - return rdma_destroy_ah_user(ah, flags, NULL); + int ret = rdma_destroy_ah_user(ah, flags, NULL); + + WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail"); } struct ib_srq *ib_create_srq_user(struct ib_pd *pd, -- cgit v1.2.3 From 119181d1d4327d3259ab25aa0ea3d3bc364afcdc Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 7 Sep 2020 15:09:16 +0300 Subject: RDMA: Restore ability to fail on SRQ destroy In similar way to other IB objects, restore the ability to return error on SRQ destroy. Strictly speaking, this change is not necessary, and provided here to ensure a symmetrical interface like other destroy functions. Fixes: 68e326dea1db ("RDMA: Handle SRQ allocations by IB/core") Link: https://lore.kernel.org/r/20200907120921.476363-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 8 ++++++-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 3 ++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 2 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 +- drivers/infiniband/hw/cxgb4/qp.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 3 ++- drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 +- drivers/infiniband/hw/mlx4/srq.c | 3 ++- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +- drivers/infiniband/hw/mlx5/srq.c | 23 ++++++++++------------- drivers/infiniband/hw/mlx5/srq.h | 2 +- drivers/infiniband/hw/mlx5/srq_cmd.c | 7 ++++--- drivers/infiniband/hw/mthca/mthca_provider.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 2 +- drivers/infiniband/hw/qedr/verbs.c | 3 ++- drivers/infiniband/hw/qedr/verbs.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 3 ++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- drivers/infiniband/sw/rdmavt/srq.c | 3 ++- drivers/infiniband/sw/rdmavt/srq.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.h | 2 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 6 +----- include/rdma/ib_verbs.h | 8 +++++--- 27 files changed, 59 insertions(+), 48 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index bd345e7ce913..41e2e35fa090 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1068,10 +1068,14 @@ EXPORT_SYMBOL(ib_query_srq); int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) { + int ret; + if (atomic_read(&srq->usecnt)) return -EBUSY; - srq->device->ops.destroy_srq(srq, udata); + ret = srq->device->ops.destroy_srq(srq, udata); + if (ret) + return ret; atomic_dec(&srq->pd->usecnt); if (srq->srq_type == IB_SRQT_XRC) @@ -1080,7 +1084,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) atomic_dec(&srq->ext.cq->usecnt); kfree(srq); - return 0; + return ret; } EXPORT_SYMBOL(ib_destroy_srq_user); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 67ebf1996700..cb5074575ba9 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1570,7 +1570,7 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) } /* Shared Receive Queues */ -void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) +int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) { struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); @@ -1585,6 +1585,7 @@ void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) atomic_dec(&rdev->srq_count); if (nq) nq->budget--; + return 0; } static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b6b56a92b78e..7ca232809466 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -176,7 +176,7 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); -void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); +int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 2b2b009b371a..fa91e80869c0 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -999,7 +999,7 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); -void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); +int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs, struct ib_udata *udata); int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cbddb20c6121..f20379e4e2ec 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2797,7 +2797,7 @@ err_free_wr_wait: return ret; } -void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_srq *srq; @@ -2813,4 +2813,5 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) srq->wr_waitp); c4iw_free_srq_idx(&rhp->rdev, srq->idx); c4iw_put_wr_wait(srq->wr_waitp); + return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index da3e8ed916f8..462a6a5cd92a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1224,7 +1224,7 @@ int hns_roce_create_srq(struct ib_srq *srq, int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); -void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 686b0c89a8de..8caf74e44efd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -363,7 +363,7 @@ err_buf_alloc: return ret; } -void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); @@ -372,6 +372,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) free_srq_idx(hr_dev, srq); free_srq_wrid(srq); free_srq_buf(hr_dev, srq); + return 0; } int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6d51653edaf8..392a5a7c2a31 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -763,7 +763,7 @@ int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); -void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); +int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 8f9d5035142d..2651b68a1c04 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -260,7 +260,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) return 0; } -void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) +int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(srq->device); struct mlx4_ib_srq *msrq = to_msrq(srq); @@ -282,6 +282,7 @@ void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) mlx4_db_free(dev->dev, &msrq->db); } ib_umem_release(msrq->umem); + return 0; } void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 1e5f77d3e86b..b7b00e9e180b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1128,7 +1128,7 @@ int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); -void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); +int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7e10cbcb6d5c..e2f720eec1e1 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -389,24 +389,21 @@ out_box: return ret; } -void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) +int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(srq->device); struct mlx5_ib_srq *msrq = to_msrq(srq); + int ret; + + ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq); + if (ret) + return ret; - mlx5_cmd_destroy_srq(dev, &msrq->msrq); - - if (srq->uobject) { - mlx5_ib_db_unmap_user( - rdma_udata_to_drv_context( - udata, - struct mlx5_ib_ucontext, - ibucontext), - &msrq->db); - ib_umem_release(msrq->umem); - } else { + if (udata) + destroy_srq_user(srq->pd, msrq, udata); + else destroy_srq_kernel(dev, msrq); - } + return 0; } void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h index af197c36d757..2c3627b2509d 100644 --- a/drivers/infiniband/hw/mlx5/srq.h +++ b/drivers/infiniband/hw/mlx5/srq.h @@ -56,7 +56,7 @@ struct mlx5_srq_table { int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in); -void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq); +int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq); int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out); int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index c53acbc63d0b..db889ec3fd48 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -590,7 +590,7 @@ err_destroy_srq_split: return err; } -void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) +int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) { struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *tmp; @@ -599,7 +599,7 @@ void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) /* Delete entry, but leave index occupied */ tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0); if (WARN_ON(tmp != srq)) - return; + return xa_err(tmp) ?: -EINVAL; err = destroy_srq_split(dev, srq); if (err) { @@ -609,12 +609,13 @@ void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) * entry and it can't fail at this stage. */ xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0); - return; + return err; } xa_erase_irq(&table->array, srq->srqn); mlx5_core_res_put(&srq->common); wait_for_completion(&srq->common.free); + return 0; } int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 12b7c5349004..5d1e17214f0c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -442,7 +442,7 @@ static int mthca_create_srq(struct ib_srq *ibsrq, return 0; } -static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) +static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { if (udata) { struct mthca_ucontext *context = @@ -456,6 +456,7 @@ static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) } mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); + return 0; } static struct ib_qp *mthca_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index a9d2f7a40b51..ed8c89c0b3e8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1858,7 +1858,7 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) return status; } -void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct ocrdma_srq *srq; struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); @@ -1873,6 +1873,7 @@ void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) kfree(srq->idx_bit_fields); kfree(srq->rqe_wr_id_tbl); + return 0; } /* unprivileged verbs and their support functions. */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 4c85be43507c..4f6806f16e61 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -92,7 +92,7 @@ int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr, int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); -void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **bad_recv_wr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index f85e916bec7d..7d65824f77bc 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1592,7 +1592,7 @@ err0: return -EFAULT; } -void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct qed_rdma_destroy_srq_in_params in_params = {}; struct qedr_dev *dev = get_qedr_dev(ibsrq->device); @@ -1610,6 +1610,7 @@ void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) DP_DEBUG(dev, QEDR_MSG_SRQ, "destroy srq: destroyed srq with srq_id=0x%0x\n", srq->srq_id); + return 0; } int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 1b4ed8d37f5e..a78b206d8b5a 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -67,7 +67,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr, int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); -void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_recv_wr); int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index f60a8e81bddd..f6802276fc04 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -240,7 +240,7 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) * * @return: 0 for success. */ -void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) +int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { struct pvrdma_srq *vsrq = to_vsrq(srq); union pvrdma_cmd_req req; @@ -259,6 +259,7 @@ void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) ret); pvrdma_free_srq(dev, vsrq); + return 0; } /** diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 58b41a3e8b7e..f9edce71b79b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -423,7 +423,7 @@ int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); -void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); +int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index f547c115af03..64d98bf238ab 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c @@ -332,7 +332,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) * @ibsrq: srq object to destroy * */ -void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); @@ -343,4 +343,5 @@ void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) if (srq->ip) kref_put(&srq->ip->ref, rvt_release_mmap_info); kvfree(srq->rq.kwq); + return 0; } diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h index 6427d7d62a9a..d5a1a053b1b9 100644 --- a/drivers/infiniband/sw/rdmavt/srq.h +++ b/drivers/infiniband/sw/rdmavt/srq.h @@ -56,6 +56,6 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); -void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); #endif /* DEF_RVTSRQ_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index c346b0295a99..df5e9b0157ef 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -340,7 +340,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) return 0; } -static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rxe_srq *srq = to_rsrq(ibsrq); @@ -349,6 +349,7 @@ static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) rxe_drop_ref(srq->pd); rxe_drop_ref(srq); + return 0; } static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 2d2b6df0b027..a6ec1e968fb4 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -1691,7 +1691,7 @@ int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs) * QP anymore - the code trusts the RDMA core environment to keep track * of QP references. */ -void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) +int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) { struct siw_srq *srq = to_siw_srq(base_srq); struct siw_device *sdev = to_siw_dev(base_srq->device); @@ -1703,6 +1703,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) rdma_user_mmap_entry_remove(srq->srq_entry); vfree(srq->recvq); atomic_dec(&sdev->num_srq); + return 0; } /* diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index 3dbab78579cb..ed2d8ac2f967 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -78,7 +78,7 @@ int siw_create_srq(struct ib_srq *base_srq, struct ib_srq_init_attr *attr, int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata); int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr); -void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata); +int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata); int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7c41fb040f7c..8f0b598a46ec 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1647,17 +1647,13 @@ int ipoib_cm_dev_init(struct net_device *dev) void ipoib_cm_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - int ret; if (!priv->cm.srq) return; ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); - ret = ib_destroy_srq(priv->cm.srq); - if (ret) - ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); - + ib_destroy_srq(priv->cm.srq); priv->cm.srq = NULL; if (!priv->cm.srq_ring) return; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 71b145f569a8..40bb6a455960 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2411,7 +2411,7 @@ struct ib_device_ops { enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); - void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); + int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); struct ib_qp *(*create_qp)(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata); @@ -3654,9 +3654,11 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); * * NOTE: for user srq use ib_destroy_srq_user with valid udata! */ -static inline int ib_destroy_srq(struct ib_srq *srq) +static inline void ib_destroy_srq(struct ib_srq *srq) { - return ib_destroy_srq_user(srq, NULL); + int ret = ib_destroy_srq_user(srq, NULL); + + WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail"); } /** -- cgit v1.2.3 From 43d781b9fa562f0c6e50f62c870fbfeb9dc85213 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 7 Sep 2020 15:09:18 +0300 Subject: RDMA: Allow fail of destroy CQ Like any other verbs objects, CQ shouldn't fail during destroy, but mlx5_ib didn't follow this contract with mixed IB verbs objects with DEVX. Such mix causes to the situation where FW and kernel are fully interdependent on the reference counting of each side. Kernel verbs and drivers that don't have DEVX flows shouldn't fail. Fixes: e39afe3d6dbd ("RDMA: Convert CQ allocations to be under core responsibility") Link: https://lore.kernel.org/r/20200907120921.476363-7-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cq.c | 5 ++++- drivers/infiniband/core/verbs.c | 9 +++++++-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 3 ++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 2 +- drivers/infiniband/hw/cxgb4/cq.c | 3 ++- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 +- drivers/infiniband/hw/efa/efa.h | 2 +- drivers/infiniband/hw/efa/efa_verbs.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_cq.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 3 ++- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 3 ++- drivers/infiniband/hw/mlx4/cq.c | 3 ++- drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 +- drivers/infiniband/hw/mlx5/cq.c | 9 +++++++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +- drivers/infiniband/hw/mthca/mthca_provider.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 2 +- drivers/infiniband/hw/qedr/verbs.c | 5 +++-- drivers/infiniband/hw/qedr/verbs.h | 2 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 4 ++-- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 3 ++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- drivers/infiniband/sw/rdmavt/cq.c | 3 ++- drivers/infiniband/sw/rdmavt/cq.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.c | 3 ++- drivers/infiniband/sw/siw/siw_verbs.h | 2 +- include/rdma/ib_verbs.h | 6 ++++-- 31 files changed, 66 insertions(+), 37 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index ab556407803c..11edf7308eac 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -319,6 +319,8 @@ EXPORT_SYMBOL(__ib_alloc_cq_any); */ void ib_free_cq(struct ib_cq *cq) { + int ret; + if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) return; if (WARN_ON_ONCE(cq->cqe_used)) @@ -340,8 +342,9 @@ void ib_free_cq(struct ib_cq *cq) rdma_dim_destroy(cq); trace_cq_free(cq); + ret = cq->device->ops.destroy_cq(cq, NULL); + WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); rdma_restrack_del(&cq->res); - cq->device->ops.destroy_cq(cq, NULL); kfree(cq->wc); kfree(cq); } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 41e2e35fa090..93503f10bcbb 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2023,16 +2023,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation); int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) { + int ret; + if (WARN_ON_ONCE(cq->shared)) return -EOPNOTSUPP; if (atomic_read(&cq->usecnt)) return -EBUSY; + ret = cq->device->ops.destroy_cq(cq, udata); + if (ret) + return ret; + rdma_restrack_del(&cq->res); - cq->device->ops.destroy_cq(cq, udata); kfree(cq); - return 0; + return ret; } EXPORT_SYMBOL(ib_destroy_cq_user); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index cb5074575ba9..4f07011e04eb 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2803,7 +2803,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, } /* Completion Queues */ -void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct bnxt_re_cq *cq; struct bnxt_qplib_nq *nq; @@ -2819,6 +2819,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) atomic_dec(&rdev->cq_count); nq->budget--; kfree(cq->cql); + return 0; } int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 7ca232809466..9a8130b79256 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 352b8af1998a..28349ed50885 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return !err || err == -ENODATA ? npolled : err; } -void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct c4iw_cq *chp; struct c4iw_ucontext *ucontext; @@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, chp->destroy_skb, chp->wr_waitp); c4iw_put_wr_wait(chp->wr_waitp); + return 0; } int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index fa91e80869c0..dc65811e6a93 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, struct ib_udata *udata); struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); -void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 6b06ce87fbfc..64ae8ba6a7f6 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); struct ib_qp *efa_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); -void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index a03e3514bd8a..57910bcfc572 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -973,7 +973,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) return efa_com_destroy_cq(&dev->edev, ¶ms); } -void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibcq->device); struct efa_cq *cq = to_ecq(ibcq); @@ -986,6 +986,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) efa_destroy_cq_idx(dev, cq->cq_idx); efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); + return 0; } static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index e87d616f7988..c5acf3332519 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -311,7 +311,7 @@ err_cq_buf: return ret; } -void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); @@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) free_cq_buf(hr_dev, hr_cq); free_cq_db(hr_dev, hr_cq, udata); free_cqc(hr_dev, hr_cq); + return 0; } void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 462a6a5cd92a..30290a7ce286 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -931,7 +931,7 @@ struct hns_roce_hw { int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct ib_udata *udata); - void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); + int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); @@ -1251,7 +1251,7 @@ int to_hr_qp_type(int qp_type); int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); int hns_roce_db_map_user(struct hns_roce_ucontext *context, struct ib_udata *udata, unsigned long virt, struct hns_roce_db *db); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 7e4b63c520e0..96c14e5fb7ba 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3572,7 +3572,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) return 0; } -static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); @@ -3603,6 +3603,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) } wait_time++; } + return 0; } static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index c0f796cb6e5e..6f40d1d82a25 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1053,7 +1053,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) * @ib_cq: cq pointer * @udata: user data or NULL for kernel object */ -static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct i40iw_cq *iwcq; struct i40iw_device *iwdev; @@ -1065,6 +1065,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) i40iw_cq_wq_destroy(iwdev, cq); cq_free_resources(iwdev, iwcq); i40iw_rem_devusecount(iwdev); + return 0; } /** diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 8a3436994f80..ee50dd823a8e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -475,7 +475,7 @@ out: return err; } -void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(cq->device); struct mlx4_ib_cq *mcq = to_mcq(cq); @@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) mlx4_db_free(dev->dev, &mcq->db); } ib_umem_release(mcq->umem); + return 0; } static void dump_cqe(void *cqe) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 392a5a7c2a31..32a024f765ea 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -742,7 +742,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b318bde2e565..35e5bbb44d3d 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1024,16 +1024,21 @@ err_cqb: return err; } -void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_cq *mcq = to_mcq(cq); + int ret; + + ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); + if (ret) + return ret; - mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); if (udata) destroy_cq_user(mcq, udata); else destroy_cq_kernel(dev, mcq); + return 0; } static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b7b00e9e180b..0a65f7ba40c4 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1151,7 +1151,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, size_t buflen, size_t *bc); int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 5d1e17214f0c..4624b975fee2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -792,7 +792,7 @@ out: return ret; } -static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { if (udata) { struct mthca_ucontext *context = @@ -811,6 +811,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) to_mcq(cq)->set_ci_db_index); } mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); + return 0; } static inline u32 convert_access(int acc) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index ed8c89c0b3e8..b24437619412 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1057,7 +1057,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) spin_unlock_irqrestore(&cq->cq_lock, flags); } -void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_eq *eq = NULL; @@ -1082,6 +1082,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ocrdma_get_db_addr(dev, pdid), dev->nic_info.db_page_size); } + return 0; } static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 4f6806f16e61..425d554e7f3f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -72,7 +72,7 @@ int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); -void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); struct ib_qp *ocrdma_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7d65824f77bc..02368c3df802 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1052,7 +1052,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) #define QEDR_DESTROY_CQ_ITER_DURATION (10) -void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qed_rdma_destroy_cq_out_params oparams; @@ -1067,7 +1067,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) /* GSIs CQs are handled by driver, so they don't exist in the FW */ if (cq->cq_type == QEDR_CQ_TYPE_GSI) { qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data); - return; + return 0; } iparams.icid = cq->icid; @@ -1115,6 +1115,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) * Since the destroy CQ ramrod has also been received on the EQ we can * be certain that there's no event handler in process. */ + return 0; } static inline int get_gid_info_from_table(struct ib_qp *ibqp, diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index a78b206d8b5a..4620fba34d5f 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -52,7 +52,7 @@ int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); -void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, struct ib_udata *); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 8af3212101be..9e961f8ffa10 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -586,9 +586,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return 0; } -void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { - return; + return 0; } struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index f8911c0330e2..11fe1ba6bbc9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -58,7 +58,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 01cd122a8b69..32aede5a3381 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) * @cq: the completion queue to destroy. * @udata: user data or null for kernel object */ -void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct pvrdma_cq *vcq = to_vcq(cq); union pvrdma_cmd_req req; @@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) pvrdma_free_cq(dev, vcq); atomic_dec(&dev->num_cqs); + return 0; } static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index f9edce71b79b..97ed8f952f6e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 04d2e72017fe..19248be14093 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -315,7 +315,7 @@ bail_wc: * * Called by ib_destroy_cq() in the generic verbs code. */ -void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_dev_info *rdi = cq->rdi; @@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) kref_put(&cq->ip->ref, rvt_release_mmap_info); else vfree(cq->kqueue); + return 0; } /** diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h index 5e26a2eb19a4..feb01e7ee004 100644 --- a/drivers/infiniband/sw/rdmavt/cq.h +++ b/drivers/infiniband/sw/rdmavt/cq.h @@ -53,7 +53,7 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index df5e9b0157ef..5a4087b01757 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -779,13 +779,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem); } -static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct rxe_cq *cq = to_rcq(ibcq); rxe_cq_disable(cq); rxe_drop_ref(cq); + return 0; } static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index a6ec1e968fb4..7cf3242ffb41 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -1056,7 +1056,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, return rv > 0 ? 0 : rv; } -void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) +int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) { struct siw_cq *cq = to_siw_cq(base_cq); struct siw_device *sdev = to_siw_dev(base_cq->device); @@ -1074,6 +1074,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) atomic_dec(&sdev->num_cq); vfree(cq->queue); + return 0; } /* diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index ed2d8ac2f967..637454529357 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -62,7 +62,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr); int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); -void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); +int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc); int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags); struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b74fd1a5ccb6..bec05baaeaed 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2423,7 +2423,7 @@ struct ib_device_ops { int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); - void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); + int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, @@ -3890,7 +3890,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); */ static inline void ib_destroy_cq(struct ib_cq *cq) { - ib_destroy_cq_user(cq, NULL); + int ret = ib_destroy_cq_user(cq, NULL); + + WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); } /** -- cgit v1.2.3 From 68363052ff5addd3817a104cc453f4e2045704c9 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 19:41:50 -0300 Subject: RDMA/qedr: Use rdma_umem_for_each_dma_block() instead of open-coding This loop is splitting the DMA SGL into pg_shift sized pages, use the core code for this directly. Link: https://lore.kernel.org/r/9-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 41 +++++++++++++++----------------------- 1 file changed, 16 insertions(+), 25 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 02368c3df802..6605841e2739 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -601,11 +601,9 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl_info *pbl_info, u32 pg_shift) { int pbe_cnt, total_num_pbes = 0; - u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; - struct sg_dma_page_iter sg_iter; + struct ib_block_iter biter; struct regpair *pbe; - u64 pg_addr; if (!pbl_info->num_pbes) return; @@ -626,32 +624,25 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, pbe_cnt = 0; - fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift); + rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { + u64 pg_addr = rdma_block_iter_dma_address(&biter); - for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { - pg_addr = sg_page_iter_dma_address(&sg_iter); - for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { - pbe->lo = cpu_to_le32(pg_addr); - pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); - pg_addr += BIT(pg_shift); - pbe_cnt++; - total_num_pbes++; - pbe++; + pbe_cnt++; + total_num_pbes++; + pbe++; - if (total_num_pbes == pbl_info->num_pbes) - return; + if (total_num_pbes == pbl_info->num_pbes) + return; - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; - } - - fw_pg_cnt++; + /* If the given pbl is full storing the pbes, move to next pbl. + */ + if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; } } } -- cgit v1.2.3 From 901bca71cd5bb940c7a57042c4c1cc15bc847ffe Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 19:41:51 -0300 Subject: RDMA/qedr: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count() The length of the list populated by qedr_populate_pbls() should be calculated using ib_umem_num_dma_blocks() with the same size/shift passed to qedr_populate_pbls(). Link: https://lore.kernel.org/r/10-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6605841e2739..9319778f5df4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -784,9 +784,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata, return PTR_ERR(q->umem); } - fw_pages = ib_umem_page_count(q->umem) << - (PAGE_SHIFT - FW_PAGE_SHIFT); - + fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT); rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; @@ -2856,7 +2854,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, goto err0; } - rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1); + rc = init_mr_info(dev, &mr->info, + ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1); if (rc) goto err1; -- cgit v1.2.3 From b3003a74456f0c1f614a46c07e16abe33bfdd087 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 19:41:57 -0300 Subject: RDMA/qedr: Remove fbo and zbva from the MR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit zbva is always false, so fbo is never read. A 'zero-based-virtual-address' is simply IOVA == 0, and the driver already supports this. Link: https://lore.kernel.org/r/16-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Michal KalderonĀ  Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 4 ---- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 12 ++---------- include/linux/qed/qed_rdma_if.h | 2 -- 3 files changed, 2 insertions(+), 16 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9319778f5df4..f7bfc43f0f5c 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2882,10 +2882,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); mr->hw_mr.page_size_log = PAGE_SHIFT; - mr->hw_mr.fbo = ib_umem_offset(mr->umem); mr->hw_mr.length = len; mr->hw_mr.vaddr = usr_addr; - mr->hw_mr.zbva = false; mr->hw_mr.phy_mr = false; mr->hw_mr.dma_mr = false; @@ -2978,10 +2976,8 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, mr->hw_mr.pbl_ptr = 0; mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); - mr->hw_mr.fbo = 0; mr->hw_mr.length = 0; mr->hw_mr.vaddr = 0; - mr->hw_mr.zbva = false; mr->hw_mr.phy_mr = true; mr->hw_mr.dma_mr = false; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 03894584415d..0df6e0587752 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1521,7 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt, params->pbl_two_level); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, - params->zbva); + false); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); @@ -1583,15 +1583,7 @@ qed_rdma_register_tid(void *rdma_cxt, p_ramrod->pd = cpu_to_le16(params->pd); p_ramrod->length_hi = (u8)(params->length >> 32); p_ramrod->length_lo = DMA_LO_LE(params->length); - if (params->zbva) { - /* Lower 32 bits of the registered MR address. - * In case of zero based MR, will hold FBO - */ - p_ramrod->va.hi = 0; - p_ramrod->va.lo = cpu_to_le32(params->fbo); - } else { - DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); - } + DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); /* DIF */ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index f464d85e88a4..aeb242cefebf 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -242,10 +242,8 @@ struct qed_rdma_register_tid_in_params { bool pbl_two_level; u8 pbl_page_size_log; u8 page_size_log; - u32 fbo; u64 length; u64 vaddr; - bool zbva; bool phy_mr; bool dma_mr; -- cgit v1.2.3 From 098e345a1a8faaad6e4e54d138773466cecc45d4 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:34 +0300 Subject: RDMA/qedr: Fix qp structure memory leak The qedr_qp structure wasn't freed when the protocol was RoCE. kmemleak output when running basic RoCE scenario. unreferenced object 0xffff927ad7e22c00 (size 1024): comm "ib_send_bw", pid 7082, jiffies 4384133693 (age 274.698s) hex dump (first 32 bytes): 00 b0 cd a2 79 92 ff ff 00 3f a1 a2 79 92 ff ff ....y....?..y... 00 ee 5c dd 80 92 ff ff 00 f6 5c dd 80 92 ff ff ..\.......\..... backtrace: [<00000000b2ba0f35>] qedr_create_qp+0xb3/0x6c0 [qedr] [<00000000e85a43dd>] ib_uverbs_handler_UVERBS_METHOD_QP_CREATE+0x555/0xad0 [ib_uverbs] [<00000000fee4d029>] ib_uverbs_cmd_verbs+0xa5a/0xb80 [ib_uverbs] [<000000005d622660>] ib_uverbs_ioctl+0xa4/0x110 [ib_uverbs] [<00000000eb4cdc71>] ksys_ioctl+0x87/0xc0 [<00000000abe6b23a>] __x64_sys_ioctl+0x16/0x20 [<0000000046e7cef4>] do_syscall_64+0x4d/0x90 [<00000000c6948f76>] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 1212767e23bb ("qedr: Add wrapping generic structure for qpidr and adjust idr routines.") Link: https://lore.kernel.org/r/20200902165741.8355-2-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index f7bfc43f0f5c..bf5fa8db7837 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2744,6 +2744,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (rdma_protocol_iwarp(&dev->ibdev, 1)) qedr_iw_qp_rem_ref(&qp->ibqp); + else + kfree(qp); return 0; } -- cgit v1.2.3 From 0b1eddc1964351cd5ce57aff46853ed4ce9ebbff Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:35 +0300 Subject: RDMA/qedr: Fix doorbell setting Change the doorbell setting so that the maximum value between the last and current value is set. This is to avoid doorbells being lost. Fixes: a7efd7773e31 ("qedr: Add support for PD,PKEY and CQ verbs") Link: https://lore.kernel.org/r/20200902165741.8355-3-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index bf5fa8db7837..1da0131d5042 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -989,7 +989,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, /* Generate doorbell address. */ cq->db.data.icid = cq->icid; cq->db_addr = dev->db_addr + db_offset; - cq->db.data.params = DB_AGG_CMD_SET << + cq->db.data.params = DB_AGG_CMD_MAX << RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT; /* point to the very last element, passing it we will toggle */ -- cgit v1.2.3 From a379ad54e55a12618cae7f6333fd1b3071de9606 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:36 +0300 Subject: RDMA/qedr: Fix use of uninitialized field dev->attr.page_size_caps was used uninitialized when setting device attributes Fixes: ec72fce401c6 ("qedr: Add support for RoCE HW init") Link: https://lore.kernel.org/r/20200902165741.8355-4-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index d85f992bac29..8e1365951fb6 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -602,7 +602,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); /* Part 2 - check capabilities */ - page_size = ~dev->attr.page_size_caps + 1; + page_size = ~qed_attr->page_size_caps + 1; if (page_size > PAGE_SIZE) { DP_ERR(dev, "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", -- cgit v1.2.3 From 8a5a10a1a74465065c75d9de1aa6685e1f1aa117 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:37 +0300 Subject: RDMA/qedr: Fix return code if accept is called on a destroyed qp In iWARP, accept could be called after a QP is already destroyed. In this case an error should be returned and not success. Fixes: 82af6d19d8d9 ("RDMA/qedr: Fix synchronization methods and memory leaks in qedr") Link: https://lore.kernel.org/r/20200902165741.8355-5-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 97fc7dd353b0..c7169d2c69e5 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct qedr_dev *dev = ep->dev; struct qedr_qp *qp; struct qed_iwarp_accept_in params; - int rc = 0; + int rc; DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); @@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) params.ord = conn_param->ord; if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, - &qp->iwarp_cm_flags)) + &qp->iwarp_cm_flags)) { + rc = -EINVAL; goto err; /* QP already destroyed */ + } rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); if (rc) { -- cgit v1.2.3 From cc293f5420e5726bdf2bb0fcb165c73e007e426e Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:39 +0300 Subject: RDMA/qedr: Fix iWARP active mtu display Currently iWARP does not support mtu-change. Notify user when MTU changes that reload of qedr is required for mtu to change. Display the correct active mtu. Fixes: f5b1b1775be6 ("RDMA/qedr: Add iWARP support in existing verbs") Link: https://lore.kernel.org/r/20200902165741.8355-7-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/main.c | 7 +++++++ drivers/infiniband/hw/qedr/verbs.c | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 8e1365951fb6..1f32e8b8cc11 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -1026,6 +1026,13 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) case QEDE_CHANGE_ADDR: qedr_mac_address_change(dev); break; + case QEDE_CHANGE_MTU: + if (rdma_protocol_iwarp(&dev->ibdev, 1)) + if (dev->ndev->mtu != dev->iwarp_max_mtu) + DP_NOTICE(dev, + "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n", + dev->iwarp_max_mtu, dev->ndev->mtu); + break; default: pr_err("Event not supported\n"); } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 1da0131d5042..9edc42281824 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -231,15 +231,16 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } attr->max_mtu = IB_MTU_4096; - attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); attr->lid = 0; attr->lmc = 0; attr->sm_lid = 0; attr->sm_sl = 0; attr->ip_gids = true; if (rdma_protocol_iwarp(&dev->ibdev, 1)) { + attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu); attr->gid_tbl_len = 1; } else { + attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); attr->gid_tbl_len = QEDR_MAX_SGID; attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN; } -- cgit v1.2.3 From fbf58026b2256e9cd5f241a4801d79d3b2b7b89d Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:40 +0300 Subject: RDMA/qedr: Fix inline size returned for iWARP commit 59e8970b3798 ("RDMA/qedr: Return max inline data in QP query result") changed query_qp max_inline size to return the max roce inline size. When iwarp was introduced, this should have been modified to return the max inline size based on protocol. This size is cached in the device attributes Fixes: 69ad0e7fe845 ("RDMA/qedr: Add support for iWARP in user space") Link: https://lore.kernel.org/r/20200902165741.8355-8-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9edc42281824..ed487b0419f0 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2629,7 +2629,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; - qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; + qp_attr->cap.max_inline_data = dev->attr.max_inline; qp_init_attr->cap = qp_attr->cap; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; -- cgit v1.2.3 From 9e054b13b2f747868c28539b3eb28256e237755f Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:41 +0300 Subject: RDMA/qedr: Fix function prototype parameters alignment Alignment of parameters was off by one Link: https://lore.kernel.org/r/20200902165741.8355-9-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index ed487b0419f0..50f971d38aa0 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1240,8 +1240,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev, } static void qedr_copy_rq_uresp(struct qedr_dev *dev, - struct qedr_create_qp_uresp *uresp, - struct qedr_qp *qp) + struct qedr_create_qp_uresp *uresp, + struct qedr_qp *qp) { /* iWARP requires two doorbells per RQ. */ if (rdma_protocol_iwarp(&dev->ibdev, 1)) { -- cgit v1.2.3 From 06e8d1df46ed52eca6915a2a76341ca65cc428b9 Mon Sep 17 00:00:00 2001 From: Yuval Basson Date: Wed, 22 Jul 2020 13:23:39 +0300 Subject: RDMA/qedr: Add support for user mode XRC-SRQ's Implement the XRC specific verbs. The additional QP type introduced new logic to the rest of the verbs that now require distinguishing whether a QP has an "RQ" or an "SQ" or both. Link: https://lore.kernel.org/r/20200722102339.30104-1-ybason@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Yuval Basson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/main.c | 19 +++ drivers/infiniband/hw/qedr/qedr.h | 33 +++++ drivers/infiniband/hw/qedr/verbs.c | 291 +++++++++++++++++++++++++------------ drivers/infiniband/hw/qedr/verbs.h | 3 +- 4 files changed, 254 insertions(+), 92 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 1f32e8b8cc11..7c0aac3e635b 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -177,6 +177,8 @@ static int qedr_iw_register_device(struct qedr_dev *dev) } static const struct ib_device_ops qedr_roce_dev_ops = { + .alloc_xrcd = qedr_alloc_xrcd, + .dealloc_xrcd = qedr_dealloc_xrcd, .get_port_immutable = qedr_roce_port_immutable, .query_pkey = qedr_query_pkey, }; @@ -186,6 +188,10 @@ static void qedr_roce_register_device(struct qedr_dev *dev) dev->ibdev.node_type = RDMA_NODE_IB_CA; ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops); + + dev->ibdev.uverbs_cmd_mask |= QEDR_UVERBS(OPEN_XRCD) | + QEDR_UVERBS(CLOSE_XRCD) | + QEDR_UVERBS(CREATE_XSRQ); } static const struct ib_device_ops qedr_dev_ops = { @@ -232,6 +238,7 @@ static const struct ib_device_ops qedr_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), + INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd), INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), }; @@ -705,6 +712,18 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) event.event = IB_EVENT_SRQ_ERR; event_type = EVENT_TYPE_SRQ; break; + case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR: + event.event = IB_EVENT_QP_ACCESS_ERR; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR: + event.event = IB_EVENT_QP_ACCESS_ERR; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR: + event.event = IB_EVENT_CQ_ERR; + event_type = EVENT_TYPE_CQ; + break; default: DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, roce_handle64); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 460292179b32..9dde70373a55 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -310,6 +310,11 @@ struct qedr_pd { struct qedr_ucontext *uctx; }; +struct qedr_xrcd { + struct ib_xrcd ibxrcd; + u16 xrcd_id; +}; + struct qedr_qp_hwq_info { /* WQE Elements */ struct qed_chain pbl; @@ -361,6 +366,7 @@ struct qedr_srq { struct ib_umem *prod_umem; u16 srq_id; u32 srq_limit; + bool is_xrc; /* lock to protect srq recv post */ spinlock_t lock; }; @@ -573,6 +579,11 @@ static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd) return container_of(ibpd, struct qedr_pd, ibpd); } +static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct qedr_xrcd, ibxrcd); +} + static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) { return container_of(ibcq, struct qedr_cq, ibcq); @@ -598,6 +609,28 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq) return container_of(ibsrq, struct qedr_srq, ibsrq); } +static inline bool qedr_qp_has_srq(struct qedr_qp *qp) +{ + return qp->srq; +} + +static inline bool qedr_qp_has_sq(struct qedr_qp *qp) +{ + if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT) + return 0; + + return 1; +} + +static inline bool qedr_qp_has_rq(struct qedr_qp *qp) +{ + if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI || + qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp)) + return 0; + + return 1; +} + static inline struct qedr_user_mmap_entry * get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry) { diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 50f971d38aa0..1b241fee4605 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -136,6 +136,8 @@ int qedr_query_device(struct ib_device *ibdev, IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS; + if (!rdma_protocol_iwarp(&dev->ibdev, 1)) + attr->device_cap_flags |= IB_DEVICE_XRC; attr->max_send_sge = qattr->max_sge; attr->max_recv_sge = qattr->max_sge; attr->max_sge_rd = qattr->max_sge; @@ -482,6 +484,23 @@ int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } + +int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibxrcd->device); + struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd); + + return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id); +} + +int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibxrcd->device); + u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id; + + dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id); + return 0; +} static void qedr_free_pbl(struct qedr_dev *dev, struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl) { @@ -1178,7 +1197,10 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, struct qedr_device_attr *qattr = &dev->attr; /* QP0... attrs->qp_type == IB_QPT_GSI */ - if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) { + if (attrs->qp_type != IB_QPT_RC && + attrs->qp_type != IB_QPT_GSI && + attrs->qp_type != IB_QPT_XRC_INI && + attrs->qp_type != IB_QPT_XRC_TGT) { DP_DEBUG(dev, QEDR_MSG_QP, "create qp: unsupported qp type=0x%x requested\n", attrs->qp_type); @@ -1221,6 +1243,22 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, return -EINVAL; } + /* verify consumer QPs are not trying to use GSI QP's CQ. + * TGT QP isn't associated with RQ/SQ + */ + if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && + (attrs->qp_type != IB_QPT_XRC_TGT)) { + struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); + struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq); + + if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) || + (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) { + DP_ERR(dev, + "create qp: consumer QP cannot use GSI CQs.\n"); + return -EINVAL; + } + } + return 0; } @@ -1283,8 +1321,12 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev, int rc; memset(uresp, 0, sizeof(*uresp)); - qedr_copy_sq_uresp(dev, uresp, qp); - qedr_copy_rq_uresp(dev, uresp, qp); + + if (qedr_qp_has_sq(qp)) + qedr_copy_sq_uresp(dev, uresp, qp); + + if (qedr_qp_has_rq(qp)) + qedr_copy_rq_uresp(dev, uresp, qp); uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; uresp->qp_id = qp->qp_id; @@ -1308,18 +1350,25 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev, kref_init(&qp->refcnt); init_completion(&qp->iwarp_cm_comp); } + qp->pd = pd; qp->qp_type = attrs->qp_type; qp->max_inline_data = attrs->cap.max_inline_data; - qp->sq.max_sges = attrs->cap.max_send_sge; qp->state = QED_ROCE_QP_STATE_RESET; qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; - qp->sq_cq = get_qedr_cq(attrs->send_cq); qp->dev = dev; + if (qedr_qp_has_sq(qp)) { + qp->sq.max_sges = attrs->cap.max_send_sge; + qp->sq_cq = get_qedr_cq(attrs->send_cq); + DP_DEBUG(dev, QEDR_MSG_QP, + "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", + qp->sq.max_sges, qp->sq_cq->icid); + } - if (attrs->srq) { + if (attrs->srq) qp->srq = get_qedr_srq(attrs->srq); - } else { + + if (qedr_qp_has_rq(qp)) { qp->rq_cq = get_qedr_cq(attrs->recv_cq); qp->rq.max_sges = attrs->cap.max_recv_sge; DP_DEBUG(dev, QEDR_MSG_QP, @@ -1338,30 +1387,26 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev, static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp) { - int rc; + int rc = 0; - qp->sq.db = dev->db_addr + - DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); - qp->sq.db_data.data.icid = qp->icid + 1; - rc = qedr_db_recovery_add(dev, qp->sq.db, - &qp->sq.db_data, - DB_REC_WIDTH_32B, - DB_REC_KERNEL); - if (rc) - return rc; + if (qedr_qp_has_sq(qp)) { + qp->sq.db = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); + qp->sq.db_data.data.icid = qp->icid + 1; + rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc) + return rc; + } - if (!qp->srq) { + if (qedr_qp_has_rq(qp)) { qp->rq.db = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); qp->rq.db_data.data.icid = qp->icid; - - rc = qedr_db_recovery_add(dev, qp->rq.db, - &qp->rq.db_data, - DB_REC_WIDTH_32B, - DB_REC_KERNEL); - if (rc) - qedr_db_recovery_del(dev, qp->sq.db, - &qp->sq.db_data); + rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc && qedr_qp_has_sq(qp)) + qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data); } return rc; @@ -1384,6 +1429,10 @@ static int qedr_check_srq_params(struct qedr_dev *dev, DP_ERR(dev, "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n", attrs->attr.max_sge, qattr->max_sge); + } + + if (!udata && attrs->srq_type == IB_SRQT_XRC) { + DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n"); return -EINVAL; } @@ -1508,6 +1557,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, return -EINVAL; srq->dev = dev; + srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC); hw_srq = &srq->hw_srq; spin_lock_init(&srq->lock); @@ -1549,6 +1599,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, in_params.prod_pair_addr = phy_prod_pair_addr; in_params.num_pages = page_cnt; in_params.page_size = page_size; + if (srq->is_xrc) { + struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd); + struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq); + + in_params.is_xrc = 1; + in_params.xrcd_id = xrcd->xrcd_id; + in_params.cq_cid = cq->icid; + } rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params); if (rc) @@ -1591,6 +1649,7 @@ int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) xa_erase_irq(&dev->srqs, srq->srq_id); in_params.srq_id = srq->srq_id; + in_params.is_xrc = srq->is_xrc; dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params); if (ibsrq->uobject) @@ -1642,6 +1701,20 @@ int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, return 0; } +static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type) +{ + switch (ib_qp_type) { + case IB_QPT_RC: + return QED_RDMA_QP_TYPE_RC; + case IB_QPT_XRC_INI: + return QED_RDMA_QP_TYPE_XRC_INI; + case IB_QPT_XRC_TGT: + return QED_RDMA_QP_TYPE_XRC_TGT; + default: + return QED_RDMA_QP_TYPE_INVAL; + } +} + static inline void qedr_init_common_qp_in_params(struct qedr_dev *dev, struct qedr_pd *pd, @@ -1656,20 +1729,27 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev, params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); params->fmr_and_reserved_lkey = fmr_and_reserved_lkey; - params->pd = pd->pd_id; - params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; - params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; + params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type); params->stats_queue = 0; - params->srq_id = 0; - params->use_srq = false; - if (!qp->srq) { + if (pd) { + params->pd = pd->pd_id; + params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; + } + + if (qedr_qp_has_sq(qp)) + params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; + + if (qedr_qp_has_rq(qp)) params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; - } else { + if (qedr_qp_has_srq(qp)) { params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; params->srq_id = qp->srq->srq_id; params->use_srq = true; + } else { + params->srq_id = 0; + params->use_srq = false; } } @@ -1683,8 +1763,10 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) "rq_len=%zd" "\n", qp, - qp->usq.buf_addr, - qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len); + qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0, + qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0, + qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0, + qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0); } static inline void @@ -1710,11 +1792,15 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_ucontext *ctx, struct qedr_qp *qp) { - ib_umem_release(qp->usq.umem); - qp->usq.umem = NULL; + if (qedr_qp_has_sq(qp)) { + ib_umem_release(qp->usq.umem); + qp->usq.umem = NULL; + } - ib_umem_release(qp->urq.umem); - qp->urq.umem = NULL; + if (qedr_qp_has_rq(qp)) { + ib_umem_release(qp->urq.umem); + qp->urq.umem = NULL; + } if (rdma_protocol_roce(&dev->ibdev, 1)) { qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); @@ -1749,28 +1835,38 @@ static int qedr_create_user_qp(struct qedr_dev *dev, { struct qed_rdma_create_qp_in_params in_params; struct qed_rdma_create_qp_out_params out_params; - struct qedr_pd *pd = get_qedr_pd(ibpd); - struct qedr_create_qp_uresp uresp; - struct qedr_ucontext *ctx = pd ? pd->uctx : NULL; - struct qedr_create_qp_ureq ureq; + struct qedr_create_qp_uresp uresp = {}; + struct qedr_create_qp_ureq ureq = {}; int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1); - int rc = -EINVAL; + struct qedr_ucontext *ctx = NULL; + struct qedr_pd *pd = NULL; + int rc = 0; qp->create_type = QEDR_QP_CREATE_USER; - memset(&ureq, 0, sizeof(ureq)); - rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen)); - if (rc) { - DP_ERR(dev, "Problem copying data from user space\n"); - return rc; + + if (ibpd) { + pd = get_qedr_pd(ibpd); + ctx = pd->uctx; } - /* SQ - read access only (0) */ - rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr, - ureq.sq_len, true, 0, alloc_and_init); - if (rc) - return rc; + if (udata) { + rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), + udata->inlen)); + if (rc) { + DP_ERR(dev, "Problem copying data from user space\n"); + return rc; + } + } - if (!qp->srq) { + if (qedr_qp_has_sq(qp)) { + /* SQ - read access only (0) */ + rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr, + ureq.sq_len, true, 0, alloc_and_init); + if (rc) + return rc; + } + + if (qedr_qp_has_rq(qp)) { /* RQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, true, 0, alloc_and_init); @@ -1782,9 +1878,21 @@ static int qedr_create_user_qp(struct qedr_dev *dev, qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params); in_params.qp_handle_lo = ureq.qp_handle_lo; in_params.qp_handle_hi = ureq.qp_handle_hi; - in_params.sq_num_pages = qp->usq.pbl_info.num_pbes; - in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa; - if (!qp->srq) { + + if (qp->qp_type == IB_QPT_XRC_TGT) { + struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd); + + in_params.xrcd_id = xrcd->xrcd_id; + in_params.qp_handle_lo = qp->qp_id; + in_params.use_srq = 1; + } + + if (qedr_qp_has_sq(qp)) { + in_params.sq_num_pages = qp->usq.pbl_info.num_pbes; + in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa; + } + + if (qedr_qp_has_rq(qp)) { in_params.rq_num_pages = qp->urq.pbl_info.num_pbes; in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa; } @@ -1806,39 +1914,32 @@ static int qedr_create_user_qp(struct qedr_dev *dev, qp->qp_id = out_params.qp_id; qp->icid = out_params.icid; - rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp); - if (rc) - goto err; + if (udata) { + rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp); + if (rc) + goto err; + } /* db offset was calculated in copy_qp_uresp, now set in the user q */ - ctx = pd->uctx; - qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; - qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; - - if (rdma_protocol_iwarp(&dev->ibdev, 1)) { - qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset; - - /* calculate the db_rec_db2 data since it is constant so no - * need to reflect from user - */ - qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid); - qp->urq.db_rec_db2_data.data.value = - cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD); + if (qedr_qp_has_sq(qp)) { + qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + rc = qedr_db_recovery_add(dev, qp->usq.db_addr, + &qp->usq.db_rec_data->db_data, + DB_REC_WIDTH_32B, + DB_REC_USER); + if (rc) + goto err; } - rc = qedr_db_recovery_add(dev, qp->usq.db_addr, - &qp->usq.db_rec_data->db_data, - DB_REC_WIDTH_32B, - DB_REC_USER); - if (rc) - goto err; - - rc = qedr_db_recovery_add(dev, qp->urq.db_addr, - &qp->urq.db_rec_data->db_data, - DB_REC_WIDTH_32B, - DB_REC_USER); - if (rc) - goto err; + if (qedr_qp_has_rq(qp)) { + qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + rc = qedr_db_recovery_add(dev, qp->urq.db_addr, + &qp->urq.db_rec_data->db_data, + DB_REC_WIDTH_32B, + DB_REC_USER); + if (rc) + goto err; + } if (rdma_protocol_iwarp(&dev->ibdev, 1)) { rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr, @@ -1849,7 +1950,6 @@ static int qedr_create_user_qp(struct qedr_dev *dev, goto err; } qedr_qp_user_print(dev, qp); - return rc; err: rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); @@ -2109,12 +2209,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { - struct qedr_dev *dev = get_qedr_dev(ibpd->device); - struct qedr_pd *pd = get_qedr_pd(ibpd); + struct qedr_xrcd *xrcd = NULL; + struct qedr_pd *pd = NULL; + struct qedr_dev *dev; struct qedr_qp *qp; struct ib_qp *ibqp; int rc = 0; + if (attrs->qp_type == IB_QPT_XRC_TGT) { + xrcd = get_qedr_xrcd(attrs->xrcd); + dev = get_qedr_dev(xrcd->ibxrcd.device); + } else { + pd = get_qedr_pd(ibpd); + dev = get_qedr_dev(ibpd->device); + } + DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", udata ? "user library" : "kernel", pd); @@ -2145,7 +2254,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, return ibqp; } - if (udata) + if (udata || xrcd) rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs); else rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 4620fba34d5f..2672c32bc2f7 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -48,7 +48,8 @@ int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma); void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry); int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); - +int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); +int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); -- cgit v1.2.3 From 376ceb31ff879c2ee6b48eef841d6fa7720f6f43 Mon Sep 17 00:00:00 2001 From: Aharon Landau Date: Thu, 17 Sep 2020 12:02:23 +0300 Subject: RDMA: Fix link active_speed size According to the IB spec active_speed size should be u16 and not u8 as before. Changing it to allow further extensions in offered speeds. Link: https://lore.kernel.org/r/20200917090223.1018224-4-leon@kernel.org Signed-off-by: Aharon Landau Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_std_types_device.c | 3 ++- drivers/infiniband/core/verbs.c | 2 +- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 2 +- drivers/infiniband/hw/hfi1/verbs.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 8 ++------ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/hw/qedr/verbs.c | 2 +- drivers/infiniband/hw/qib/qib.h | 6 +++--- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- include/rdma/ib_verbs.h | 7 ++++--- 10 files changed, 17 insertions(+), 19 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 75df2094a010..9f43c0161a8e 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -165,7 +165,8 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr, resp->subnet_timeout = attr->subnet_timeout; resp->init_type_reply = attr->init_type_reply; resp->active_width = attr->active_width; - resp->active_speed = attr->active_speed; + /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */ + resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR); resp->phys_state = attr->phys_state; resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num); } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 307886737646..f5f8959a4592 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1781,7 +1781,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, } EXPORT_SYMBOL(ib_modify_qp_with_udata); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) { int rc; u32 netdev_speed; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index a300588634c5..b930ea3dab7a 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -150,7 +150,7 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; - u8 active_speed; + u16 active_speed; u8 active_width; /* FP Notification Queue (CQ & SRQ) */ diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 30865635b449..3591923abebb 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1424,7 +1424,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num, props->gid_tbl_len = HFI1_GUIDS_PER_PORT; props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */ - props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active); + props->active_speed = opa_speed_to_ib(ppd->link_speed_active); props->max_vl_num = ppd->vls_supported; /* Once we are a "first class" citizen and have added the OPA MTUs to diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2dbacf0ebda3..b13d1be1ef52 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -457,7 +457,6 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, bool put_mdev = true; u16 qkey_viol_cntr; u32 eth_prot_oper; - u16 active_speed; u8 mdev_port_num; bool ext; int err; @@ -491,12 +490,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_QDR; - translate_eth_proto_oper(eth_prot_oper, &active_speed, + translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width, ext); - WARN_ON_ONCE(active_speed & ~0xFF); - props->active_speed = (u8)active_speed; - props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; @@ -1307,7 +1303,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, props->port_cap_flags2 = rep->cap_mask2; err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, - (u16 *)&props->active_speed, port); + &props->active_speed, port); if (err) goto out; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index c1751c9a0f62..8cf237deaef7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -112,7 +112,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, } static inline void get_link_speed_and_width(struct ocrdma_dev *dev, - u8 *ib_speed, u8 *ib_width) + u16 *ib_speed, u8 *ib_width) { int status; u8 speed; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b49bef94637e..b1384512ff48 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -163,7 +163,7 @@ int qedr_query_device(struct ib_device *ibdev, return 0; } -static inline void get_link_speed_and_width(int speed, u8 *ib_speed, +static inline void get_link_speed_and_width(int speed, u16 *ib_speed, u8 *ib_width) { switch (speed) { diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 432d6d0fd7f4..ee211423058a 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -619,11 +619,11 @@ struct qib_pportdata { /* LID mask control */ u8 lmc; u8 link_width_supported; - u8 link_speed_supported; + u16 link_speed_supported; u8 link_width_enabled; - u8 link_speed_enabled; + u16 link_speed_enabled; u8 link_width_active; - u8 link_speed_active; + u16 link_speed_active; u8 vls_supported; u8 vls_operational; /* Rx Polarity inversion (compensate for ~tx on partner) */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 699b20849a7e..f3d642199194 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -176,7 +176,7 @@ struct pvrdma_port_attr { u8 subnet_timeout; u8 init_type_reply; u8 active_width; - u8 active_speed; + u16 active_speed; u8 phys_state; u8 reserved[2]; }; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c0b2fa7e9b95..a7e203bcb012 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -535,7 +535,8 @@ enum ib_port_speed { IB_SPEED_FDR10 = 8, IB_SPEED_FDR = 16, IB_SPEED_EDR = 32, - IB_SPEED_HDR = 64 + IB_SPEED_HDR = 64, + IB_SPEED_NDR = 128, }; /** @@ -669,7 +670,7 @@ struct ib_port_attr { u8 subnet_timeout; u8 init_type_reply; u8 active_width; - u8 active_speed; + u16 active_speed; u8 phys_state; u16 port_cap_flags2; }; @@ -4410,7 +4411,7 @@ void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width); static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) { -- cgit v1.2.3 From 3e45410fe3c202ffb619f301beff0644f717e132 Mon Sep 17 00:00:00 2001 From: Keita Suzuki Date: Fri, 11 Sep 2020 12:51:59 +0000 Subject: RDMA/qedr: Fix resource leak in qedr_create_qp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When xa_insert() fails, the acquired resource in qedr_create_qp should also be freed. However, current implementation does not handle the error. Fix this by adding a new goto label that calls qedr_free_qp_resources. Fixes: 1212767e23bb ("qedr: Add wrapping generic structure for qpidr and adjust idr routines.") Link: https://lore.kernel.org/r/20200911125159.4577-1-keitasuzuki.park@sslab.ics.keio.ac.jp Signed-off-by: Keita Suzuki Acked-by: Michal KalderonĀ  Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 52 ++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 25 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index ba8626847cf6..da42cc70e372 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2205,6 +2205,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev, return rc; } +static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, + struct ib_udata *udata) +{ + struct qedr_ucontext *ctx = + rdma_udata_to_drv_context(udata, struct qedr_ucontext, + ibucontext); + int rc; + + if (qp->qp_type != IB_QPT_GSI) { + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); + if (rc) + return rc; + } + + if (qp->create_type == QEDR_QP_CREATE_USER) + qedr_cleanup_user(dev, ctx, qp); + else + qedr_cleanup_kernel(dev, qp); + + return 0; +} + struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) @@ -2260,19 +2282,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); if (rc) - goto err; + goto out_free_qp; qp->ibqp.qp_num = qp->qp_id; if (rdma_protocol_iwarp(&dev->ibdev, 1)) { rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL); if (rc) - goto err; + goto out_free_qp_resources; } return &qp->ibqp; -err: +out_free_qp_resources: + qedr_free_qp_resources(dev, qp, udata); +out_free_qp: kfree(qp); return ERR_PTR(-EFAULT); @@ -2773,28 +2797,6 @@ err: return rc; } -static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, - struct ib_udata *udata) -{ - struct qedr_ucontext *ctx = - rdma_udata_to_drv_context(udata, struct qedr_ucontext, - ibucontext); - int rc; - - if (qp->qp_type != IB_QPT_GSI) { - rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); - if (rc) - return rc; - } - - if (qp->create_type == QEDR_QP_CREATE_USER) - qedr_cleanup_user(dev, ctx, qp); - else - qedr_cleanup_kernel(dev, qp); - - return 0; -} - int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct qedr_qp *qp = get_qedr_qp(ibqp); -- cgit v1.2.3 From b925c555a15de8443ecb2e147b54b1bbe9b71fe3 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sat, 26 Sep 2020 13:24:48 +0300 Subject: RDMA/drivers: Remove udata check from special QP GSI QP can't be created from the user space, hence the udata check is always false (udata == NULL). Remove that check and simplify the flow. Link: https://lore.kernel.org/r/20200926102450.2966017-9-leon@kernel.org Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 57 +++++++++------------------- drivers/infiniband/hw/mlx4/qp.c | 3 -- drivers/infiniband/hw/mlx5/qp.c | 12 ------ drivers/infiniband/hw/mthca/mthca_provider.c | 4 -- drivers/infiniband/hw/qedr/verbs.c | 8 ---- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 3 +- 6 files changed, 19 insertions(+), 68 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 7c3b54881d46..6e06f42be5eb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1015,53 +1015,32 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, int ret; switch (init_attr->qp_type) { - case IB_QPT_RC: { - hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); - if (!hr_qp) - return ERR_PTR(-ENOMEM); - - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_qp); - if (ret) { - ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n", - hr_qp->qpn, ret); - kfree(hr_qp); - return ERR_PTR(ret); - } - + case IB_QPT_RC: + case IB_QPT_GSI: break; + default: + ibdev_err(ibdev, "not support QP type %d\n", + init_attr->qp_type); + return ERR_PTR(-EOPNOTSUPP); } - case IB_QPT_GSI: { - /* Userspace is not allowed to create special QPs: */ - if (udata) { - ibdev_err(ibdev, "not support usr space GSI\n"); - return ERR_PTR(-EINVAL); - } - hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); - if (!hr_qp) - return ERR_PTR(-ENOMEM); + hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); + if (!hr_qp) + return ERR_PTR(-ENOMEM); + if (init_attr->qp_type == IB_QPT_GSI) { hr_qp->port = init_attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_qp); - if (ret) { - ibdev_err(ibdev, "Create GSI QP failed!\n"); - kfree(hr_qp); - return ERR_PTR(ret); - } - - break; - } - default:{ - ibdev_err(ibdev, "not support QP type %d\n", - init_attr->qp_type); - return ERR_PTR(-EOPNOTSUPP); - } } + ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); + if (ret) { + ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", + init_attr->qp_type, ret); + ibdev_err(ibdev, "Create GSI QP failed!\n"); + kfree(hr_qp); + return ERR_PTR(ret); + } return &hr_qp->ibqp; } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f810e4df3fbf..5cb8e602294c 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1544,9 +1544,6 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, { int sqpn; - /* Userspace is not allowed to create special QPs: */ - if (udata) - return -EINVAL; if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) { int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a251aa9da201..600e056798c0 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2511,18 +2511,6 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd, return -EINVAL; } - switch (attr->qp_type) { - case IB_QPT_SMI: - case MLX5_IB_QPT_HW_GSI: - case MLX5_IB_QPT_REG_UMR: - case IB_QPT_GSI: - mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n", - attr->qp_type); - return -EINVAL; - default: - break; - } - /* * We don't need to see this warning, it means that kernel code * missing ib_pd. Placed here to catch developer's mistakes. diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82ee252fe5aa..5dbddf8faf99 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -535,10 +535,6 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, case IB_QPT_SMI: case IB_QPT_GSI: { - /* Don't allow userspace to create special QPs */ - if (udata) - return ERR_PTR(-EINVAL); - qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index da42cc70e372..23559f1fe96e 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1235,14 +1235,6 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, return -EINVAL; } - /* Unprivileged user space cannot create special QP */ - if (udata && attrs->qp_type == IB_QPT_GSI) { - DP_ERR(dev, - "create qp: userspace can't create special QPs of type=0x%x\n", - attrs->qp_type); - return -EINVAL; - } - /* verify consumer QPs are not trying to use GSI QP's CQ. * TGT QP isn't associated with RQ/SQ */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 8a385acf6f0c..428256c55065 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -232,8 +232,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, switch (init_attr->qp_type) { case IB_QPT_GSI: if (init_attr->port_num == 0 || - init_attr->port_num > pd->device->phys_port_cnt || - udata) { + init_attr->port_num > pd->device->phys_port_cnt) { dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); ret = -EINVAL; goto err_qp; -- cgit v1.2.3 From f45271acdf9eeb003296862b017806d41ec4ec55 Mon Sep 17 00:00:00 2001 From: Alok Prasad Date: Thu, 1 Oct 2020 10:09:59 +0000 Subject: RDMA/qedr: Endianness warnings cleanup Making a change to fix following sparse warnings reported by kbuild bot. CHECK drivers/infiniband/hw/qedr/verbs.c drivers/infiniband/hw/qedr/verbs.c:3872:59: warning: incorrect type in assignment (different base types) drivers/infiniband/hw/qedr/verbs.c:3872:59: expected restricted __le32 [usertype] sge_prod drivers/infiniband/hw/qedr/verbs.c:3872:59: got unsigned int [usertype] sge_prod drivers/infiniband/hw/qedr/verbs.c:3875:59: warning: incorrect type in assignment (different base types) drivers/infiniband/hw/qedr/verbs.c:3875:59: expected restricted __le32 [usertype] wqe_prod drivers/infiniband/hw/qedr/verbs.c:3875:59: got unsigned int [usertype] wqe_prod Link: https://lore.kernel.org/r/20201001100959.19940-1-palok@marvell.com Reported-by: kbuild test robot Fixes: acca72e2b031 ("RDMA/qedr: SRQ's bug fixes") Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: Alok Prasad Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 23559f1fe96e..b5603b3ed6a4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3861,10 +3861,10 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ - srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod; + srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod); /* Make sure sge producer is updated first */ dma_wmb(); - srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod; + srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod); wr = wr->next; } -- cgit v1.2.3 From 1c15b4f2a42ff6697767c22c8ff5f9bcc22fdbe5 Mon Sep 17 00:00:00 2001 From: Avihai Horon Date: Wed, 23 Sep 2020 19:50:13 +0300 Subject: RDMA/core: Modify enum ib_gid_type and enum rdma_network_type Separate IB_GID_TYPE_IB and IB_GID_TYPE_ROCE to two different values, so enum ib_gid_type will match the gid types of the new query GID table API which will be introduced in the following patches. This change in enum ib_gid_type requires to change also enum rdma_network_type by separating RDMA_NETWORK_IB and RDMA_NETWORK_ROCE_V1 values. Link: https://lore.kernel.org/r/20200923165015.2491894-3-leon@kernel.org Signed-off-by: Avihai Horon Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 4 ++++ drivers/infiniband/core/cma.c | 4 ++++ drivers/infiniband/core/cma_configfs.c | 9 +++++---- drivers/infiniband/core/verbs.c | 2 +- drivers/infiniband/hw/mlx5/cq.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/qedr/verbs.c | 4 +++- include/rdma/ib_verbs.h | 17 ++++++++++------- 8 files changed, 30 insertions(+), 16 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 6079f1f7e678..cf49ac0b0aa6 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -133,7 +133,11 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) } static const char * const gid_type_str[] = { + /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for + * user space compatibility reasons. + */ [IB_GID_TYPE_IB] = "IB/RoCE v1", + [IB_GID_TYPE_ROCE] = "IB/RoCE v1", [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2", }; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6419b798cd2e..09a844755882 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -304,6 +304,10 @@ int cma_set_default_gid_type(struct cma_device *cma_dev, if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; + if (default_gid_type == IB_GID_TYPE_IB && + rdma_protocol_roce_eth_encap(cma_dev->device, port)) + default_gid_type = IB_GID_TYPE_ROCE; + supported_gids = roce_gid_type_mask_support(cma_dev->device, port); if (!(supported_gids & 1 << default_gid_type)) diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 3c1e2ca564fe..7ec4af2ed87a 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -123,16 +123,17 @@ static ssize_t default_roce_mode_store(struct config_item *item, { struct cma_device *cma_dev; struct cma_dev_port_group *group; - int gid_type = ib_cache_gid_parse_type_str(buf); + int gid_type; ssize_t ret; - if (gid_type < 0) - return -EINVAL; - ret = cma_configfs_params_get(item, &cma_dev, &group); if (ret) return ret; + gid_type = ib_cache_gid_parse_type_str(buf); + if (gid_type < 0) + return -EINVAL; + ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type); cma_configfs_params_put(cma_dev); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 53dd8284260a..740f8454b6b4 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -733,7 +733,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, (struct in6_addr *)dgid); return 0; } else if (net_type == RDMA_NETWORK_IPV6 || - net_type == RDMA_NETWORK_IB) { + net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { *dgid = hdr->ibgrh.dgid; *sgid = hdr->ibgrh.sgid; return 0; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 35e5bbb44d3d..fb62f1d04afa 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -255,7 +255,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, switch (roce_packet_type) { case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: - wc->network_hdr_type = RDMA_NETWORK_IB; + wc->network_hdr_type = RDMA_NETWORK_ROCE_V1; break; case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: wc->network_hdr_type = RDMA_NETWORK_IPV6; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index db602ee8f730..7082172b5b61 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -546,7 +546,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr) { - enum ib_gid_type gid_type = IB_GID_TYPE_IB; + enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; u16 vlan_id = 0xffff; u8 roce_version = 0; u8 roce_l3_type = 0; @@ -561,7 +561,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, } switch (gid_type) { - case IB_GID_TYPE_IB: + case IB_GID_TYPE_ROCE: roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b5603b3ed6a4..019642ff24a7 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1157,7 +1157,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp, SET_FIELD(qp_params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); break; - case RDMA_NETWORK_IB: + case RDMA_NETWORK_ROCE_V1: memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0], sizeof(qp_params->sgid)); memcpy(&qp_params->dgid.bytes[0], @@ -1177,6 +1177,8 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); qp_params->roce_mode = ROCE_V2_IPV4; break; + default: + return -EINVAL; } for (i = 0; i < 4; i++) { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5ad997346f7f..3b61fba531d0 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -138,10 +138,9 @@ union ib_gid { extern union ib_gid zgid; enum ib_gid_type { - /* If link layer is Ethernet, this is RoCE V1 */ IB_GID_TYPE_IB = 0, - IB_GID_TYPE_ROCE = 0, - IB_GID_TYPE_ROCE_UDP_ENCAP = 1, + IB_GID_TYPE_ROCE = 1, + IB_GID_TYPE_ROCE_UDP_ENCAP = 2, IB_GID_TYPE_SIZE }; @@ -180,7 +179,7 @@ rdma_node_get_transport(unsigned int node_type); enum rdma_network_type { RDMA_NETWORK_IB, - RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, + RDMA_NETWORK_ROCE_V1, RDMA_NETWORK_IPV4, RDMA_NETWORK_IPV6 }; @@ -190,9 +189,10 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net if (network_type == RDMA_NETWORK_IPV4 || network_type == RDMA_NETWORK_IPV6) return IB_GID_TYPE_ROCE_UDP_ENCAP; - - /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ - return IB_GID_TYPE_IB; + else if (network_type == RDMA_NETWORK_ROCE_V1) + return IB_GID_TYPE_ROCE; + else + return IB_GID_TYPE_IB; } static inline enum rdma_network_type @@ -201,6 +201,9 @@ rdma_gid_attr_network_type(const struct ib_gid_attr *attr) if (attr->gid_type == IB_GID_TYPE_IB) return RDMA_NETWORK_IB; + if (attr->gid_type == IB_GID_TYPE_ROCE) + return RDMA_NETWORK_ROCE_V1; + if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) return RDMA_NETWORK_IPV4; else -- cgit v1.2.3 From e0477b34d9d11c1a7b1f80bfdbcdc8952ce2adb7 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 8 Oct 2020 11:27:52 +0300 Subject: RDMA: Explicitly pass in the dma_device to ib_register_device The code in setup_dma_device has become rather convoluted, move all of this to the drivers. Drives now pass in a DMA capable struct device which will be used to setup DMA, or drivers must fully configure the ibdev for DMA and pass in NULL. Other than setting the masks in rvt all drivers were doing this already anyhow. mthca, mlx4 and mlx5 were already setting up maximum DMA segment size for DMA based on their hardweare limits in: __mthca_init_one() dma_set_max_seg_size (1G) __mlx4_init_one() dma_set_max_seg_size (1G) mlx5_pci_init() set_dma_caps() dma_set_max_seg_size (2G) Other non software drivers (except usnic) were extended to UINT_MAX [1, 2] instead of 2G as was before. [1] https://lore.kernel.org/linux-rdma/20200924114940.GE9475@nvidia.com/ [2] https://lore.kernel.org/linux-rdma/20200924114940.GE9475@nvidia.com/ Link: https://lore.kernel.org/r/20201008082752.275846-1-leon@kernel.org Link: https://lore.kernel.org/r/6b2ed339933d066622d5715903870676d8cc523a.1602590106.git.mchehab+huawei@kernel.org Suggested-by: Christoph Hellwig Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Reviewed-by: Christoph Hellwig Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 75 ++++++++------------------ drivers/infiniband/hw/bnxt_re/main.c | 3 +- drivers/infiniband/hw/cxgb4/provider.c | 4 +- drivers/infiniband/hw/efa/efa_main.c | 4 +- drivers/infiniband/hw/hns/hns_roce_main.c | 3 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 3 +- drivers/infiniband/hw/mlx4/main.c | 3 +- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 4 +- drivers/infiniband/hw/qedr/main.c | 3 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 3 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 4 +- drivers/infiniband/sw/rdmavt/vt.c | 6 ++- drivers/infiniband/sw/rxe/rxe_verbs.c | 9 ++-- drivers/infiniband/sw/siw/siw_main.c | 8 +-- include/rdma/ib_verbs.h | 3 +- 17 files changed, 59 insertions(+), 80 deletions(-) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index dab1f9d65880..a3b1fc84cdca 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1177,58 +1177,23 @@ out: return ret; } -static void setup_dma_device(struct ib_device *device) +static void setup_dma_device(struct ib_device *device, + struct device *dma_device) { - struct device *parent = device->dev.parent; - - WARN_ON_ONCE(device->dma_device); - -#ifdef CONFIG_DMA_OPS - if (device->dev.dma_ops) { - /* - * The caller provided custom DMA operations. Copy the - * DMA-related fields that are used by e.g. dma_alloc_coherent() - * into device->dev. - */ - device->dma_device = &device->dev; - if (!device->dev.dma_mask) { - if (parent) - device->dev.dma_mask = parent->dma_mask; - else - WARN_ON_ONCE(true); - } - if (!device->dev.coherent_dma_mask) { - if (parent) - device->dev.coherent_dma_mask = - parent->coherent_dma_mask; - else - WARN_ON_ONCE(true); - } - } else -#endif /* CONFIG_DMA_OPS */ - { - /* - * The caller did not provide custom DMA operations. Use the - * DMA mapping operations of the parent device. - */ - WARN_ON_ONCE(!parent); - device->dma_device = parent; - } - - if (!device->dev.dma_parms) { - if (parent) { - /* - * The caller did not provide DMA parameters, so - * 'parent' probably represents a PCI device. The PCI - * core sets the maximum segment size to 64 - * KB. Increase this parameter to 2 GB. - */ - device->dev.dma_parms = parent->dma_parms; - dma_set_max_seg_size(device->dma_device, SZ_2G); - } else { - WARN_ON_ONCE(true); - } + /* + * If the caller does not provide a DMA capable device then the IB + * device will be used. In this case the caller should fully setup the + * ibdev for DMA. This usually means using dma_virt_ops. + */ +#ifdef CONFIG_DMA_VIRT_OPS + if (!dma_device) { + device->dev.dma_ops = &dma_virt_ops; + dma_device = &device->dev; } +#endif + WARN_ON(!dma_device); + device->dma_device = dma_device; + WARN_ON(!device->dma_device->dma_parms); } /* @@ -1241,7 +1206,6 @@ static int setup_device(struct ib_device *device) struct ib_udata uhw = {.outlen = 0, .inlen = 0}; int ret; - setup_dma_device(device); ib_device_check_mandatory(device); ret = setup_port_data(device); @@ -1354,7 +1318,10 @@ static void prevent_dealloc_device(struct ib_device *ib_dev) * ib_register_device - Register an IB device with IB core * @device: Device to register * @name: unique string device name. This may include a '%' which will - * cause a unique index to be added to the passed device name. + * cause a unique index to be added to the passed device name. + * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB + * device will be used. In this case the caller should fully + * setup the ibdev for DMA. This usually means using dma_virt_ops. * * Low-level drivers use ib_register_device() to register their * devices with the IB core. All registered clients will receive a @@ -1365,7 +1332,8 @@ static void prevent_dealloc_device(struct ib_device *ib_dev) * asynchronously then the device pointer may become freed as soon as this * function returns. */ -int ib_register_device(struct ib_device *device, const char *name) +int ib_register_device(struct ib_device *device, const char *name, + struct device *dma_device) { int ret; @@ -1373,6 +1341,7 @@ int ib_register_device(struct ib_device *device, const char *name) if (ret) return ret; + setup_dma_device(device, dma_device); ret = setup_device(device); if (ret) return ret; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 53aee5a42ab8..04621ba8fa76 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -736,7 +736,8 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) if (ret) return ret; - return ib_register_device(ibdev, "bnxt_re%d"); + dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); + return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); } static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 4b76f2f3f4e4..8138c57a1e43 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -570,7 +570,9 @@ void c4iw_register_device(struct work_struct *work) ret = set_netdevs(&dev->ibdev, &dev->rdev); if (ret) goto err_dealloc_ctx; - ret = ib_register_device(&dev->ibdev, "cxgb4_%d"); + dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX); + ret = ib_register_device(&dev->ibdev, "cxgb4_%d", + &dev->rdev.lldi.pdev->dev); if (ret) goto err_dealloc_ctx; return; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 92d701146320..6faed3a81e08 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -331,7 +331,7 @@ static int efa_ib_device_add(struct efa_dev *dev) ib_set_device_ops(&dev->ibdev, &efa_dev_ops); - err = ib_register_device(&dev->ibdev, "efa_%d"); + err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev); if (err) goto err_release_doorbell_bar; @@ -418,7 +418,7 @@ static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev) err); return err; } - + dma_set_max_seg_size(&pdev->dev, UINT_MAX); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 467c82900019..afeffafc59f9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -549,7 +549,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (ret) return ret; } - ret = ib_register_device(ib_dev, "hns_%d"); + dma_set_max_seg_size(dev, UINT_MAX); + ret = ib_register_device(ib_dev, "hns_%d", dev); if (ret) { dev_err(dev, "ib_register_device failed!\n"); return ret; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 747b4de6faca..581ecbadf586 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2761,7 +2761,8 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev) if (ret) goto error; - ret = ib_register_device(&iwibdev->ibdev, "i40iw%d"); + dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX); + ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev); if (ret) goto error; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 753c70402498..cd0fba6b0964 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2841,7 +2841,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_steer_free_bitmap; rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group); - if (ib_register_device(&ibdev->ib_dev, "mlx4_%d")) + if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", + &dev->persist->pdev->dev)) goto err_diag_counters; if (mlx4_ib_mad_init(ibdev)) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 7082172b5b61..89e04ca62ae0 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4380,7 +4380,7 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) name = "mlx5_%d"; else name = "mlx5_bond_%d"; - return ib_register_device(&dev->ib_dev, name); + return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev); } static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 31b558ff8218..c4d9cdc4ee97 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1206,7 +1206,7 @@ int mthca_register_device(struct mthca_dev *dev) mutex_init(&dev->cap_mask_mutex); rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group); - ret = ib_register_device(&dev->ib_dev, "mthca%d"); + ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev); if (ret) return ret; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index d8c47d24d6d6..9b96661a7143 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -255,7 +255,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) if (ret) return ret; - return ib_register_device(&dev->ibdev, "ocrdma%d"); + dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX); + return ib_register_device(&dev->ibdev, "ocrdma%d", + &dev->nic_info.pdev->dev); } static int ocrdma_alloc_resources(struct ocrdma_dev *dev) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7c0aac3e635b..967641662b24 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -293,7 +293,8 @@ static int qedr_register_device(struct qedr_dev *dev) if (rc) return rc; - return ib_register_device(&dev->ibdev, "qedr%d"); + dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX); + return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev); } /* This function allocates fast-path status block memory */ diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 462ed71abf53..aa2e65fc5cd6 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -425,7 +425,8 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (ret) goto err_fwd_dealloc; - if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d")) + dma_set_max_seg_size(&dev->dev, SZ_2G); + if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev)) goto err_fwd_dealloc; usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 780fd2dfc07e..fa2a3fa0c3e4 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -270,7 +270,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) spin_lock_init(&dev->srq_tbl_lock); rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); - ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d"); + ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev); if (ret) goto err_srq_free; @@ -854,7 +854,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, goto err_free_resource; } } - + dma_set_max_seg_size(&pdev->dev, UINT_MAX); pci_set_master(pdev); /* Map register space */ diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 2d534c450f3c..52218684ad4a 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -579,7 +579,9 @@ int rvt_register_device(struct rvt_dev_info *rdi) spin_lock_init(&rdi->n_cqs_lock); /* DMA Operations */ - rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops; + rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms; + dma_set_coherent_mask(&rdi->ibdev.dev, + rdi->ibdev.dev.parent->coherent_dma_mask); /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); @@ -627,7 +629,7 @@ int rvt_register_device(struct rvt_dev_info *rdi) rdi->ibdev.num_comp_vectors = 1; /* We are now good to announce we exist */ - ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev)); + ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL); if (ret) { rvt_pr_err(rdi, "Failed to register driver with ib core.\n"); goto bail_wss; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index ba8faa34969b..1fc022362fbe 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1128,12 +1128,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) dev->local_dma_lkey = 0; addrconf_addr_eui48((unsigned char *)&dev->node_guid, rxe->ndev->dev_addr); - dev->dev.dma_ops = &dma_virt_ops; dev->dev.dma_parms = &rxe->dma_parms; - rxe->dma_parms = (struct device_dma_parameters) - { .max_segment_size = SZ_2G }; - dma_coerce_mask_and_coherent(&dev->dev, - dma_get_required_mask(&dev->dev)); + dma_set_max_seg_size(&dev->dev, UINT_MAX); + dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev)); dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) @@ -1182,7 +1179,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) rxe->tfm = tfm; rdma_set_device_sysfs_group(dev, &rxe_attr_group); - err = ib_register_device(dev, ibdev_name); + err = ib_register_device(dev, ibdev_name, NULL); if (err) pr_warn("%s failed with error %d\n", __func__, err); diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index d862bec84376..ca8bc7296867 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -69,7 +69,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name) sdev->vendor_part_id = dev_id++; - rv = ib_register_device(base_dev, name); + rv = ib_register_device(base_dev, name, NULL); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; @@ -382,10 +382,10 @@ static struct siw_device *siw_device_create(struct net_device *netdev) */ base_dev->phys_port_cnt = 1; base_dev->dev.parent = parent; - base_dev->dev.dma_ops = &dma_virt_ops; base_dev->dev.dma_parms = &sdev->dma_parms; - sdev->dma_parms = (struct device_dma_parameters) - { .max_segment_size = SZ_2G }; + dma_set_max_seg_size(&base_dev->dev, UINT_MAX); + dma_set_coherent_mask(&base_dev->dev, + dma_get_required_mask(&base_dev->dev)); base_dev->num_comp_vectors = num_possible_cpus(); xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ce935d70fdc8..9bf6c319a670 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2782,7 +2782,8 @@ void ib_dealloc_device(struct ib_device *device); void ib_get_device_fw_str(struct ib_device *device, char *str); -int ib_register_device(struct ib_device *device, const char *name); +int ib_register_device(struct ib_device *device, const char *name, + struct device *dma_device); void ib_unregister_device(struct ib_device *device); void ib_unregister_driver(enum rdma_driver_id driver_id); void ib_unregister_device_and_put(struct ib_device *device); -- cgit v1.2.3 From a2267f8a52eea9096861affd463f691be0f0e8c9 Mon Sep 17 00:00:00 2001 From: Alok Prasad Date: Wed, 21 Oct 2020 11:50:08 +0000 Subject: RDMA/qedr: Fix memory leak in iWARP CM Fixes memory leak in iWARP CM Fixes: e411e0587e0d ("RDMA/qedr: Add iWARP connection management functions") Link: https://lore.kernel.org/r/20201021115008.28138-1-palok@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Igor Russkikh Signed-off-by: Alok Prasad Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/infiniband/hw/qedr') diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index c7169d2c69e5..c4bc58736e48 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) listener->qed_handle); cm_id->rem_ref(cm_id); + kfree(listener); return rc; } -- cgit v1.2.3