diff options
author | Selvin Xavier <selvin.xavier@broadcom.com> | 2019-03-28 19:49:43 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-03-28 20:13:27 +0300 |
commit | 5aa8484080115cff2da68080ad1d115613648504 (patch) | |
tree | dfd69350691e899b676149e8851b3fa197291b9c /drivers/infiniband/hw/bnxt_re/ib_verbs.c | |
parent | 196b4ce57d1612ca03be3c7f14bfb6b0740c5c53 (diff) | |
download | linux-5aa8484080115cff2da68080ad1d115613648504.tar.xz |
RDMA/bnxt_re: Use correct sizing on buffers holding page DMA addresses
umem->nmap is used while allocating internal buffer for storing
page DMA addresses. This causes out of bounds array access while iterating
the umem DMA-mapped SGL with umem page combining as umem->nmap can be
less than number of system pages in umem.
Use ib_umem_num_pages() instead of umem->nmap to size the page array.
Add a new structure (bnxt_qplib_sg_info) to pass sglist, npages and nmap.
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 071b2fc38b0b..33b2a06c6dde 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -895,8 +895,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, return PTR_ERR(umem); qp->sumem = umem; - qplib_qp->sq.sglist = umem->sg_head.sgl; - qplib_qp->sq.nmap = umem->nmap; + qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl; + qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem); + qplib_qp->sq.sg_info.nmap = umem->nmap; qplib_qp->qp_handle = ureq.qp_handle; if (!qp->qplib_qp.srq) { @@ -907,8 +908,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, if (IS_ERR(umem)) goto rqfail; qp->rumem = umem; - qplib_qp->rq.sglist = umem->sg_head.sgl; - qplib_qp->rq.nmap = umem->nmap; + qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl; + qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem); + qplib_qp->rq.sg_info.nmap = umem->nmap; } qplib_qp->dpi = &cntx->dpi; @@ -916,8 +918,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, rqfail: ib_umem_release(qp->sumem); qp->sumem = NULL; - qplib_qp->sq.sglist = NULL; - qplib_qp->sq.nmap = 0; + memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); return PTR_ERR(umem); } @@ -1374,8 +1375,9 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, return PTR_ERR(umem); srq->umem = umem; - qplib_srq->nmap = umem->nmap; - qplib_srq->sglist = umem->sg_head.sgl; + qplib_srq->sg_info.sglist = umem->sg_head.sgl; + qplib_srq->sg_info.npages = ib_umem_num_pages(umem); + qplib_srq->sg_info.nmap = umem->nmap; qplib_srq->srq_handle = ureq.srq_handle; qplib_srq->dpi = &cntx->dpi; @@ -2632,8 +2634,9 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, rc = PTR_ERR(cq->umem); goto fail; } - cq->qplib_cq.sghead = cq->umem->sg_head.sgl; - cq->qplib_cq.nmap = cq->umem->nmap; + cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl; + cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem); + cq->qplib_cq.sg_info.nmap = cq->umem->nmap; cq->qplib_cq.dpi = &uctx->dpi; } else { cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); @@ -2645,8 +2648,6 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, } cq->qplib_cq.dpi = &rdev->dpi_privileged; - cq->qplib_cq.sghead = NULL; - cq->qplib_cq.nmap = 0; } /* * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a |