diff options
| author | Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> | 2026-03-02 14:00:34 +0300 |
|---|---|---|
| committer | Jason Gunthorpe <jgg@nvidia.com> | 2026-03-08 13:20:25 +0300 |
| commit | 3d4a42360c338203b26eeaba7762a4e6a9ebce01 (patch) | |
| tree | f2d765153e91b591b7cebc48404226840bd99f86 | |
| parent | 1234a9d8aebbf24a46ef5d323bf9074bc911423e (diff) | |
| download | linux-3d4a42360c338203b26eeaba7762a4e6a9ebce01.tar.xz | |
RDMA/bnxt_re: Refactor bnxt_re_create_cq()
Some applications may allocate dmabuf based memory for CQs. To support
this, update the existing code to use SZ_4K to specify supported HW
page size for CQs, as we support only 4K pages for now.
Call ib_umem_find_best_pgsz() to ensure umem supports this requested
page size. A helper function includes these changes.
Link: https://patch.msgid.link/r/20260302110036.36387-5-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
| -rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index cc082e8125c8..99200c7da660 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3349,6 +3349,26 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) return ib_respond_empty_udata(udata); } +static int bnxt_re_setup_sginfo(struct bnxt_re_dev *rdev, + struct ib_umem *umem, + struct bnxt_qplib_sg_info *sginfo) +{ + unsigned long page_size; + + if (!umem) + return -EINVAL; + + page_size = ib_umem_find_best_pgsz(umem, SZ_4K, 0); + if (!page_size || page_size != SZ_4K) + return -EINVAL; + + sginfo->umem = umem; + sginfo->npages = ib_umem_num_dma_blocks(umem, page_size); + sginfo->pgsize = page_size; + sginfo->pgshft = __builtin_ctz(page_size); + return 0; +} + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct uverbs_attr_bundle *attrs) { @@ -3380,8 +3400,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (entries > dev_attr->max_cq_wqes + 1) entries = dev_attr->max_cq_wqes + 1; - cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; - cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; if (udata) { struct bnxt_re_cq_req req; @@ -3396,7 +3414,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, rc = PTR_ERR(cq->umem); goto fail; } - cq->qplib_cq.sg_info.umem = cq->umem; + rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info); + if (rc) + goto fail; + cq->qplib_cq.dpi = &uctx->dpi; } else { cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); @@ -3406,6 +3427,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto fail; } + cq->qplib_cq.sg_info.pgsize = SZ_4K; + cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K); cq->qplib_cq.dpi = &rdev->dpi_privileged; } cq->qplib_cq.max_wqe = entries; |
