diff options
author | Jiri Kosina <jkosina@suse.cz> | 2023-04-26 23:52:34 +0300 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2023-04-26 23:52:34 +0300 |
commit | cdc780f044a803aff8845b949f800f0f3d095d5f (patch) | |
tree | 6695a80568f6b4aef414070f17859f434e229957 /drivers/infiniband/sw/rxe/rxe_verbs.c | |
parent | 38518593ec55e897abda4b4be77b2ec8ec4447d1 (diff) | |
parent | 37386669887d3f2ccf021322c5558353d20f2387 (diff) | |
download | linux-cdc780f044a803aff8845b949f800f0f3d095d5f.tar.xz |
Merge branch 'for-6.4/amd-sfh' into for-linus
- assorted functional fixes for amd-sfh driver (Basavaraj Natikar)
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 115 |
1 files changed, 53 insertions, 62 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 025b35bf014e..e14050a69276 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -245,7 +245,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) int num_sge = ibwr->num_sge; int full; - full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER); + full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); if (unlikely(full)) return -ENOMEM; @@ -256,7 +256,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) for (i = 0; i < num_sge; i++) length += ibwr->sg_list[i].length; - recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER); + recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); recv_wqe->wr_id = ibwr->wr_id; memcpy(recv_wqe->dma.sge, ibwr->sg_list, @@ -268,7 +268,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) recv_wqe->dma.cur_sge = 0; recv_wqe->dma.sge_offset = 0; - queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER); + queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); return 0; } @@ -623,17 +623,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, spin_lock_irqsave(&qp->sq.sq_lock, flags); - full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER); + full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); if (unlikely(full)) { spin_unlock_irqrestore(&qp->sq.sq_lock, flags); return -ENOMEM; } - send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER); + send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP); init_send_wqe(qp, ibwr, mask, length, send_wqe); - queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER); + queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); spin_unlock_irqrestore(&qp->sq.sq_lock, flags); @@ -821,12 +821,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_lock_irqsave(&cq->cq_lock, flags); for (i = 0; i < num_entries; i++) { - cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER); + cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); if (!cqe) break; memcpy(wc++, &cqe->ibwc, sizeof(*wc)); - queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER); + queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); } spin_unlock_irqrestore(&cq->cq_lock, flags); @@ -838,7 +838,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) struct rxe_cq *cq = to_rcq(ibcq); int count; - count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER); + count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP); return (count > wc_cnt) ? wc_cnt : count; } @@ -854,7 +854,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = flags & IB_CQ_SOLICITED_MASK; - empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER); + empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP); if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) ret = 1; @@ -869,10 +869,17 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; + int err; - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) - return ERR_PTR(-ENOMEM); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_out; + } + + err = rxe_add_to_pool(&rxe->mr_pool, mr); + if (err) + goto err_free; rxe_get(pd); mr->ibmr.pd = ibpd; @@ -880,8 +887,12 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) rxe_mr_init_dma(access, mr); rxe_finalize(mr); - return &mr->ibmr; + +err_free: + kfree(mr); +err_out: + return ERR_PTR(err); } static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, @@ -895,9 +906,15 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) - return ERR_PTR(-ENOMEM); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_out; + } + + err = rxe_add_to_pool(&rxe->mr_pool, mr); + if (err) + goto err_free; rxe_get(pd); mr->ibmr.pd = ibpd; @@ -905,14 +922,16 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, err = rxe_mr_init_user(rxe, start, length, iova, access, mr); if (err) - goto err1; + goto err_cleanup; rxe_finalize(mr); - return &mr->ibmr; -err1: +err_cleanup: rxe_cleanup(mr); +err_free: + kfree(mr); +err_out: return ERR_PTR(err); } @@ -927,9 +946,15 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, if (mr_type != IB_MR_TYPE_MEM_REG) return ERR_PTR(-EINVAL); - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) - return ERR_PTR(-ENOMEM); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_out; + } + + err = rxe_add_to_pool(&rxe->mr_pool, mr); + if (err) + goto err_free; rxe_get(pd); mr->ibmr.pd = ibpd; @@ -937,53 +962,19 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, err = rxe_mr_init_fast(max_num_sg, mr); if (err) - goto err1; + goto err_cleanup; rxe_finalize(mr); - return &mr->ibmr; -err1: +err_cleanup: rxe_cleanup(mr); +err_free: + kfree(mr); +err_out: return ERR_PTR(err); } -static int rxe_set_page(struct ib_mr *ibmr, u64 addr) -{ - struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map *map; - struct rxe_phys_buf *buf; - - if (unlikely(mr->nbuf == mr->num_buf)) - return -ENOMEM; - - map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; - buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; - - buf->addr = addr; - buf->size = ibmr->page_size; - mr->nbuf++; - - return 0; -} - -static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset) -{ - struct rxe_mr *mr = to_rmr(ibmr); - int n; - - mr->nbuf = 0; - - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); - - mr->page_shift = ilog2(ibmr->page_size); - mr->page_mask = ibmr->page_size - 1; - mr->offset = ibmr->iova & mr->page_mask; - - return n; -} - static ssize_t parent_show(struct device *device, struct device_attribute *attr, char *buf) { |