diff options
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_qp.c')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_qp.c | 94 |
1 files changed, 67 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index a6565b674801..3257ad11be48 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -393,40 +393,38 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, /* Get buf size, SQ and RQ are aligned to page_szie */ if (hr_dev->caps.max_sq_sg <= 2) { - hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << + hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), PAGE_SIZE) + - HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt << + round_up((hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.offset = 0; - hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt << + hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift), PAGE_SIZE); } else { page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); hr_qp->sge.sge_cnt = ex_sge_num ? max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0; - hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << + hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size) + - HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt << + round_up((hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift), page_size) + - HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt << + round_up((hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift), page_size); hr_qp->sq.offset = 0; if (ex_sge_num) { - hr_qp->sge.offset = HNS_ROCE_ALIGN_UP( - (hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), - page_size); + hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt << + hr_qp->sq.wqe_shift), + page_size); hr_qp->rq.offset = hr_qp->sge.offset + - HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt << - hr_qp->sge.sge_shift), - page_size); + round_up((hr_qp->sge.sge_cnt << + hr_qp->sge.sge_shift), + page_size); } else { - hr_qp->rq.offset = HNS_ROCE_ALIGN_UP( - (hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), - page_size); + hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt << + hr_qp->sq.wqe_shift), + page_size); } } @@ -593,20 +591,18 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); hr_qp->sq.offset = 0; - size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, - page_size); + size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size); if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), - (u32)hr_qp->sge.sge_cnt); + (u32)hr_qp->sge.sge_cnt); hr_qp->sge.offset = size; - size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt << - hr_qp->sge.sge_shift, page_size); + size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift, + page_size); } hr_qp->rq.offset = size; - size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), - page_size); + size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size); hr_qp->buff_size = size; /* Get wr and sge number which send */ @@ -681,6 +677,29 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) kfree(hr_qp->rq_inl_buf.wqe_list); } +static void add_qp_to_list(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_cq *send_cq, struct ib_cq *recv_cq) +{ + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; + unsigned long flags; + + hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); + + list_add_tail(&hr_qp->node, &hr_dev->qp_list); + if (hr_send_cq) + list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); + if (hr_recv_cq) + list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); + + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -744,7 +763,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_alloc_rq_inline_buf; } - hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, + hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr, hr_qp->buff_size, 0); if (IS_ERR(hr_qp->umem)) { dev_err(dev, "ib_umem_get error for create qp\n"); @@ -950,6 +969,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } hr_qp->event = hns_roce_ib_qp_event; + + add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq); + hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); return 0; @@ -1232,7 +1254,16 @@ out: void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(send_cq == NULL && recv_cq == NULL)) { + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { + spin_lock_irq(&recv_cq->lock); + __acquire(&send_cq->lock); + } else if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { @@ -1248,7 +1279,16 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(send_cq == NULL && recv_cq == NULL)) { + __release(&recv_cq->lock); + __release(&send_cq->lock); + } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { + __release(&recv_cq->lock); + spin_unlock(&send_cq->lock); + } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { + __release(&send_cq->lock); + spin_unlock(&recv_cq->lock); + } else if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { |