diff options
Diffstat (limited to 'drivers/infiniband/hw')
21 files changed, 120 insertions, 109 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 54fdd4cf5288..611bacd00b80 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -698,7 +698,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd, ah->qplib_ah.flow_label = grh->flow_label; ah->qplib_ah.hop_limit = grh->hop_limit; ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); - if (ib_pd->uobject && + if (udata && !rdma_is_multicast_addr((struct in6_addr *) grh->dgid.raw) && !rdma_link_local_addr((struct in6_addr *) @@ -729,7 +729,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd, } /* Write AVID to shared page. */ - if (ib_pd->uobject) { + if (udata) { struct ib_ucontext *ib_uctx = ib_pd->uobject->context; struct bnxt_re_ucontext *uctx; unsigned long flag; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 7a1dc83ba588..b34b1a1bd94b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -836,7 +836,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, * Kernel users need more wq space for fastreg WRs which can take * 2 WR fragments. */ - ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; + ucontext = udata ? to_iwch_ucontext(pd->uobject->context) : NULL; if (!ucontext && wqsize < (rqsize + (2 * sqsize))) wqsize = roundup_pow_of_two(rqsize + roundup_pow_of_two(attrs->cap.max_send_wr * 2)); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 5a8030bd4208..981ff5cfb5d1 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2163,7 +2163,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (sqsize < 8) sqsize = 8; - ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; + ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL; qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); if (!qhp) @@ -2712,7 +2712,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs, rqsize = attrs->attr.max_wr + 1; rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16)); - ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; + ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL; srq = kzalloc(sizeof(*srq), GFP_KERNEL); if (!srq) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index d17a7ce3c93a..b74c742b000c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3926,7 +3926,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) struct hns_roce_qp_work *qp_work; struct hns_roce_v1_priv *priv; struct hns_roce_cq *send_cq, *recv_cq; - int is_user = !!ibqp->pd->uobject; + bool is_user = ibqp->uobject; int is_timeout = 0; int ret; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 22236ebf033d..3a669451cf86 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4133,7 +4133,7 @@ out: static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, - int is_user) + bool is_user) { struct hns_roce_cq *send_cq, *recv_cq; struct device *dev = hr_dev->dev; @@ -4210,7 +4210,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); int ret; - ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject); + ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject); if (ret) { dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); return ret; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 52d2b299b3be..54031c5b53fa 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -280,7 +280,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, EXPORT_SYMBOL_GPL(hns_roce_release_range_qp); static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, int is_user, int has_rq, + struct ib_qp_cap *cap, bool is_user, int has_rq, struct hns_roce_qp *hr_qp) { struct device *dev = hr_dev->dev; @@ -560,7 +560,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, else hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); - ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, + ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, hns_roce_qp_has_rq(init_attr), hr_qp); if (ret) { dev_err(dev, "hns_roce_set_rq_size failed\n"); @@ -598,7 +598,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, init_attr->cap.max_recv_sge]; } - if (ib_pd->uobject) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); ret = -EFAULT; @@ -783,7 +783,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, else hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); - if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && + if (udata && (udata->outlen >= sizeof(resp)) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) { /* indicate kernel supports rq record db */ @@ -810,7 +810,7 @@ err_qpn: hns_roce_release_range_qp(hr_dev, qpn, 1); err_wrid: - if (ib_pd->uobject) { + if (udata) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp)) && hns_roce_qp_has_rq(init_attr)) @@ -823,7 +823,7 @@ err_wrid: } err_sq_dbmap: - if (ib_pd->uobject) + if (udata) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && (udata->inlen >= sizeof(ucmd)) && (udata->outlen >= sizeof(resp)) && @@ -836,13 +836,13 @@ err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); err_buf: - if (ib_pd->uobject) + if (hr_qp->umem) ib_umem_release(hr_qp->umem); else hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); err_db: - if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) && + if (!udata && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -888,7 +888,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, } case IB_QPT_GSI: { /* Userspace is not allowed to create special QPs: */ - if (pd->uobject) { + if (udata) { dev_err(dev, "not support usr space GSI\n"); return ERR_PTR(-EINVAL); } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 6377e734e28e..960b1946c365 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -379,7 +379,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, srq->event = hns_roce_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->srqn; - if (pd->uobject) { + if (udata) { if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { ret = -EFAULT; goto err_wrid; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 475f32cf447e..0b675b0742c2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -673,28 +673,26 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, goto error; } iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; - if (ibpd->uobject && ibpd->uobject->context) { - iwqp->user_mode = 1; - ucontext = to_ucontext(ibpd->uobject->context); - - if (req.user_wqe_buffers) { - struct i40iw_pbl *iwpbl; - - spin_lock_irqsave( - &ucontext->qp_reg_mem_list_lock, flags); - iwpbl = i40iw_get_pbl( - (unsigned long)req.user_wqe_buffers, - &ucontext->qp_reg_mem_list); - spin_unlock_irqrestore( - &ucontext->qp_reg_mem_list_lock, flags); - - if (!iwpbl) { - err_code = -ENODATA; - i40iw_pr_err("no pbl info\n"); - goto error; - } - memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl)); + iwqp->user_mode = 1; + ucontext = to_ucontext(ibpd->uobject->context); + + if (req.user_wqe_buffers) { + struct i40iw_pbl *iwpbl; + + spin_lock_irqsave( + &ucontext->qp_reg_mem_list_lock, flags); + iwpbl = i40iw_get_pbl( + (unsigned long)req.user_wqe_buffers, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore( + &ucontext->qp_reg_mem_list_lock, flags); + + if (!iwpbl) { + err_code = -ENODATA; + i40iw_pr_err("no pbl info\n"); + goto error; } + memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl)); } err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); } else { @@ -768,7 +766,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, iwdev->qp_table[qp_num] = iwqp; i40iw_add_pdusecount(iwqp->iwpd); i40iw_add_devusecount(iwdev); - if (ibpd->uobject && udata) { + if (udata) { memset(&uresp, 0, sizeof(uresp)); uresp.actual_sq_size = sq_size; uresp.actual_rq_size = rq_size; @@ -2092,7 +2090,8 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) ib_umem_release(iwmr->region); if (iwmr->type != IW_MEMREG_TYPE_MEM) { - if (ibpd->uobject) { + /* region is released. only test for userness. */ + if (iwmr->region) { struct i40iw_ucontext *ucontext; ucontext = to_ucontext(ibpd->uobject->context); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 651d8594dc3f..24ee30f1cb45 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -323,7 +323,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, - int is_user, int has_rq, struct mlx4_ib_qp *qp, + bool is_user, int has_rq, struct mlx4_ib_qp *qp, u32 inl_recv_sz) { /* Sanity check RQ size before proceeding */ @@ -942,7 +942,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); - if (pd->uobject) { + if (udata) { union { struct mlx4_ib_create_qp qp; struct mlx4_ib_create_wq wq; @@ -991,7 +991,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, qp->flags |= MLX4_IB_QP_SCATTER_FCS; } - err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, + err = set_rq_size(dev, &init_attr->cap, udata, qp_has_rq(init_attr), qp, qp->inl_recv_sz); if (err) goto err; @@ -1043,7 +1043,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; } else { - err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, + err = set_rq_size(dev, &init_attr->cap, udata, qp_has_rq(init_attr), qp, 0); if (err) goto err; @@ -1189,7 +1189,7 @@ err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) free_proxy_bufs(pd->device, qp); err_wrid: - if (pd->uobject) { + if (udata) { if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { @@ -1201,13 +1201,13 @@ err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: - if (pd->uobject) + if (qp->umem) ib_umem_release(qp->umem); else mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); err_db: - if (!pd->uobject && qp_has_rq(init_attr)) + if (!udata && qp_has_rq(init_attr)) mlx4_db_free(dev->dev, &qp->db); err: @@ -1332,7 +1332,7 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) } static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, - enum mlx4_ib_source_type src, int is_user) + enum mlx4_ib_source_type src, bool is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; unsigned long flags; @@ -1612,7 +1612,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp) struct mlx4_ib_pd *pd; pd = get_pd(mqp); - destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject); + destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject); } if (is_sqp(dev, mqp)) @@ -4044,7 +4044,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, struct mlx4_ib_create_wq ucmd; int err, required_cmd_sz; - if (!(udata && pd->uobject)) + if (!udata) return ERR_PTR(-EINVAL); required_cmd_sz = offsetof(typeof(ucmd), comp_mask) + diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 3731b31c3653..4456f1b8921d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -105,7 +105,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, buf_size = srq->msrq.max * desc_size; - if (pd->uobject) { + if (udata) { struct mlx4_ib_create_srq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { @@ -191,7 +191,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, srq->msrq.event = mlx4_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; - if (pd->uobject) + if (udata) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { err = -EFAULT; goto err_wrid; @@ -202,7 +202,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, return &srq->ibsrq; err_wrid: - if (pd->uobject) + if (udata) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else kvfree(srq->wrid); @@ -211,13 +211,13 @@ err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); err_buf: - if (pd->uobject) + if (srq->umem) ib_umem_release(srq->umem); else mlx4_buf_free(dev->dev, buf_size, &srq->buf); err_db: - if (!pd->uobject) + if (!udata) mlx4_db_free(dev->dev, &srq->db); err_srq: diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2ceb8f68f605..b26ddb147643 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1904,7 +1904,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING; } - if (pd && pd->uobject) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { mlx5_ib_dbg(dev, "copy failed\n"); return -EFAULT; @@ -1986,14 +1986,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qp->has_rq = qp_has_rq(init_attr); err = set_rq_size(dev, &init_attr->cap, qp->has_rq, - qp, (pd && pd->uobject) ? &ucmd : NULL); + qp, udata ? &ucmd : NULL); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); return err; } if (pd) { - if (pd->uobject) { + if (udata) { __u32 max_wqes = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); @@ -2064,7 +2064,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (qp->scat_cqe && is_connected(init_attr->qp_type)) { configure_responder_scat_cqe(init_attr, qpc); configure_requester_scat_cqe(dev, init_attr, - (pd && pd->uobject) ? &ucmd : NULL, + udata ? &ucmd : NULL, qpc); } @@ -2504,7 +2504,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, dev = to_mdev(pd->device); if (init_attr->qp_type == IB_QPT_RAW_PACKET) { - if (!pd->uobject) { + if (!udata) { mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); return ERR_PTR(-EINVAL); } else if (!to_mucontext(pd->uobject->context)->cqe_version) { diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0413b10dea71..4e8d18009f58 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -260,14 +260,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, } in.type = init_attr->srq_type; - if (pd->uobject) + if (udata) err = create_srq_user(pd, srq, &in, udata, buf_size); else err = create_srq_kernel(dev, srq, &in, buf_size); if (err) { mlx5_ib_warn(dev, "create srq %s failed, err %d\n", - pd->uobject ? "user" : "kernel", err); + udata ? "user" : "kernel", err); goto err_srq; } @@ -312,7 +312,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, srq->msrq.event = mlx5_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; - if (pd->uobject) + if (udata) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { mlx5_ib_dbg(dev, "copy to user failed\n"); err = -EFAULT; @@ -327,7 +327,7 @@ err_core: mlx5_cmd_destroy_srq(dev, &srq->msrq); err_usr_kern_srq: - if (pd->uobject) + if (udata) destroy_srq_user(pd, srq); else destroy_srq_kernel(dev, srq); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 220a3e4717a3..bfd4eebc1182 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -510,7 +510,8 @@ int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe); int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, - struct ib_srq_attr *attr, struct mthca_srq *srq); + struct ib_srq_attr *attr, struct mthca_srq *srq, + struct ib_udata *udata); void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); @@ -547,7 +548,8 @@ int mthca_alloc_qp(struct mthca_dev *dev, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, - struct mthca_qp *qp); + struct mthca_qp *qp, + struct ib_udata *udata); int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, @@ -556,7 +558,8 @@ int mthca_alloc_sqp(struct mthca_dev *dev, struct ib_qp_cap *cap, int qpn, int port, - struct mthca_sqp *sqp); + struct mthca_sqp *sqp, + struct ib_udata *udata); void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp); int mthca_create_ah(struct mthca_dev *dev, struct mthca_pd *pd, diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index c697ec54ea5f..443521cf8107 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -455,7 +455,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, if (!srq) return ERR_PTR(-ENOMEM); - if (pd->uobject) { + if (udata) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { @@ -475,9 +475,9 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, } err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), - &init_attr->attr, srq); + &init_attr->attr, srq, udata); - if (err && pd->uobject) + if (err && udata) mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.db_index); @@ -537,7 +537,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, if (!qp) return ERR_PTR(-ENOMEM); - if (pd->uobject) { + if (udata) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { @@ -574,9 +574,9 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->qp_type, init_attr->sq_sig_type, - &init_attr->cap, qp); + &init_attr->cap, qp, udata); - if (err && pd->uobject) { + if (err && udata) { context = to_mucontext(pd->uobject->context); mthca_unmap_user_db(to_mdev(pd->device), @@ -596,7 +596,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, case IB_QPT_GSI: { /* Don't allow userspace to create special QPs */ - if (pd->uobject) + if (udata) return ERR_PTR(-EINVAL); qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); @@ -610,7 +610,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, to_mcq(init_attr->recv_cq), init_attr->sq_sig_type, &init_attr->cap, qp->ibqp.qp_num, init_attr->port_num, - to_msqp(qp)); + to_msqp(qp), udata); break; } default: diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 9d178ee3c96a..4e5b5cc17f1d 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -981,7 +981,8 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev, */ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, - struct mthca_qp *qp) + struct mthca_qp *qp, + struct ib_udata *udata) { int size; int err = -ENOMEM; @@ -1048,7 +1049,7 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, * allocate anything. All we need is to calculate the WQE * sizes and the send_wqe_offset, so we're done now. */ - if (pd->ibpd.uobject) + if (udata) return 0; size = PAGE_ALIGN(qp->send_wqe_offset + @@ -1155,7 +1156,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, - struct mthca_qp *qp) + struct mthca_qp *qp, + struct ib_udata *udata) { int ret; int i; @@ -1178,7 +1180,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, if (ret) return ret; - ret = mthca_alloc_wqe_buf(dev, pd, qp); + ret = mthca_alloc_wqe_buf(dev, pd, qp, udata); if (ret) { mthca_unmap_memfree(dev, qp); return ret; @@ -1191,7 +1193,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, * will be allocated and buffers will be initialized in * userspace. */ - if (pd->ibpd.uobject) + if (udata) return 0; ret = mthca_alloc_memfree(dev, qp); @@ -1285,7 +1287,8 @@ int mthca_alloc_qp(struct mthca_dev *dev, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, - struct mthca_qp *qp) + struct mthca_qp *qp, + struct ib_udata *udata) { int err; @@ -1308,7 +1311,7 @@ int mthca_alloc_qp(struct mthca_dev *dev, qp->port = 0; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, - send_policy, qp); + send_policy, qp, udata); if (err) { mthca_free(&dev->qp_table.alloc, qp->qpn); return err; @@ -1360,7 +1363,8 @@ int mthca_alloc_sqp(struct mthca_dev *dev, struct ib_qp_cap *cap, int qpn, int port, - struct mthca_sqp *sqp) + struct mthca_sqp *sqp, + struct ib_udata *udata) { u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; int err; @@ -1391,7 +1395,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, sqp->qp.transport = MLX; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, - send_policy, &sqp->qp); + send_policy, &sqp->qp, udata); if (err) goto err_out_free; diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 9a3fc6fb0d7e..b8333c79e3fa 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -95,7 +95,8 @@ static inline int *wqe_to_link(void *wqe) static void mthca_tavor_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, - struct mthca_tavor_srq_context *context) + struct mthca_tavor_srq_context *context, + bool is_user) { memset(context, 0, sizeof *context); @@ -103,7 +104,7 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev, context->state_pd = cpu_to_be32(pd->pd_num); context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); - if (pd->ibpd.uobject) + if (is_user) context->uar = cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); else @@ -113,7 +114,8 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev, static void mthca_arbel_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, - struct mthca_arbel_srq_context *context) + struct mthca_arbel_srq_context *context, + bool is_user) { int logsize, max; @@ -129,7 +131,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev, context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); context->db_index = cpu_to_be32(srq->db_index); context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); - if (pd->ibpd.uobject) + if (is_user) context->logstride_usrpage |= cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); else @@ -145,14 +147,14 @@ static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) } static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, - struct mthca_srq *srq) + struct mthca_srq *srq, struct ib_udata *udata) { struct mthca_data_seg *scatter; void *wqe; int err; int i; - if (pd->ibpd.uobject) + if (udata) return 0; srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); @@ -197,7 +199,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, } int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, - struct ib_srq_attr *attr, struct mthca_srq *srq) + struct ib_srq_attr *attr, struct mthca_srq *srq, + struct ib_udata *udata) { struct mthca_mailbox *mailbox; int ds; @@ -235,7 +238,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, if (err) goto err_out; - if (!pd->ibpd.uobject) { + if (!udata) { srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, srq->srqn, &srq->db); if (srq->db_index < 0) { @@ -251,7 +254,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, goto err_out_db; } - err = mthca_alloc_srq_buf(dev, pd, srq); + err = mthca_alloc_srq_buf(dev, pd, srq, udata); if (err) goto err_out_mailbox; @@ -261,9 +264,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, mutex_init(&srq->mutex); if (mthca_is_memfree(dev)) - mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata); else - mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata); err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); @@ -297,14 +300,14 @@ err_out_free_srq: mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); err_out_free_buf: - if (!pd->ibpd.uobject) + if (!udata) mthca_free_srq_buf(dev, srq); err_out_mailbox: mthca_free_mailbox(dev, mailbox); err_out_db: - if (!pd->ibpd.uobject && mthca_is_memfree(dev)) + if (!udata && mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); err_out_icm: diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f9d510431900..4e7f08ee1907 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1066,7 +1066,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, } if (req.user_qp_buffer) nesqp->nesuqp_addr = req.user_qp_buffer; - if ((ibpd->uobject) && (ibpd->uobject->context)) { + if (udata && (ibpd->uobject->context)) { nesqp->user_mode = 1; nes_ucontext = to_nesucontext(ibpd->uobject->context); if (virt_wqs) { @@ -1257,7 +1257,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, nes_put_cqp_request(nesdev, cqp_request); - if (ibpd->uobject) { + if (udata) { uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; uresp.mmap_rq_db_index = 0; uresp.actual_sq_size = sq_size; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 1a5c048e1d67..c46bed0c5513 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1157,7 +1157,8 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) } static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, - struct ib_qp_init_attr *attrs) + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) { if ((attrs->qp_type != IB_QPT_GSI) && (attrs->qp_type != IB_QPT_RC) && @@ -1205,7 +1206,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, return -EINVAL; } /* unprivileged user space cannot create special QP */ - if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { + if (udata && attrs->qp_type == IB_QPT_GSI) { pr_err ("%s(%d) Userspace can't create special QPs of type=0x%x\n", __func__, dev->id, attrs->qp_type); @@ -1362,7 +1363,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, struct ocrdma_create_qp_ureq ureq; u16 dpp_credit_lmt, dpp_offset; - status = ocrdma_check_qp_params(ibpd, dev, attrs); + status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); if (status) goto gen_err; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 28c6d5b11487..8056121e9f69 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1136,7 +1136,8 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp, } static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, - struct ib_qp_init_attr *attrs) + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) { struct qedr_device_attr *qattr = &dev->attr; @@ -1177,7 +1178,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, } /* Unprivileged user space cannot create special QP */ - if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { + if (udata && attrs->qp_type == IB_QPT_GSI) { DP_ERR(dev, "create qp: userspace can't create special QPs of type=0x%x\n", attrs->qp_type); @@ -1540,7 +1541,7 @@ int qedr_destroy_srq(struct ib_srq *ibsrq) in_params.srq_id = srq->srq_id; dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params); - if (ibsrq->pd->uobject) + if (ibsrq->uobject) qedr_free_srq_user_params(srq); else qedr_free_srq_kernel_params(srq); @@ -1993,7 +1994,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", udata ? "user library" : "kernel", pd); - rc = qedr_check_qp_attrs(ibpd, dev, attrs); + rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata); if (rc) return ERR_PTR(rc); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index cf22f57a9f0d..3acf74cbe266 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -249,7 +249,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, init_completion(&qp->free); qp->state = IB_QPS_RESET; - qp->is_kernel = !(pd->uobject && udata); + qp->is_kernel = !udata; if (!qp->is_kernel) { dev_dbg(&dev->pdev->dev, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index dc0ce877c7a3..06ba7c7a2235 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -111,7 +111,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, unsigned long flags; int ret; - if (!(pd->uobject && udata)) { + if (!udata) { /* No support for kernel clients. */ dev_warn(&dev->pdev->dev, "no shared receive queue support for kernel client\n"); |