diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cq.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 117 |
1 files changed, 59 insertions, 58 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index be07da1997e6..8e2d490e757a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -33,12 +33,12 @@ #include "iw_cxgb4.h" static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, - struct c4iw_dev_ucontext *uctx, struct sk_buff *skb) + struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, + struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; - struct c4iw_wr_wait wr_wait; int ret; wr_len = sizeof *res_wr + sizeof *res; @@ -50,17 +50,14 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (uintptr_t)&wr_wait; + res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_RESET; res->u.cq.iqid = cpu_to_be32(cq->cqid); - c4iw_init_wr_wait(&wr_wait); - ret = c4iw_ofld_send(rdev, skb); - if (!ret) { - ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); - } + c4iw_init_wr_wait(wr_waitp); + ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); kfree(cq->sw_queue); dma_free_coherent(&(rdev->lldi.pdev->dev), @@ -71,13 +68,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, } static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, - struct c4iw_dev_ucontext *uctx) + struct c4iw_dev_ucontext *uctx, + struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int user = (uctx != &rdev->uctx); - struct c4iw_wr_wait wr_wait; int ret; struct sk_buff *skb; @@ -119,7 +116,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (uintptr_t)&wr_wait; + res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; @@ -139,13 +136,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); - c4iw_init_wr_wait(&wr_wait); - - ret = c4iw_ofld_send(rdev, skb); - if (ret) - goto err4; - pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait); - ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); + c4iw_init_wr_wait(wr_waitp); + ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); if (ret) goto err4; @@ -178,7 +170,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) { struct t4_cqe cqe; - pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, + pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | @@ -197,7 +189,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) int in_use = wq->rq.in_use - count; BUG_ON(in_use < 0); - pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, + pr_debug("wq %p cq %p rq.in_use %u skip count %u\n", wq, cq, wq->rq.in_use, count); while (in_use--) { insert_recv_cqe(wq, cq); @@ -211,7 +203,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, { struct t4_cqe cqe; - pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, + pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | @@ -281,8 +273,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) /* * Insert this completed cqe into the swcq. */ - pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n", - __func__, cidx, cq->sw_pidx); + pr_debug("moving cqe into swcq sq idx %u cq idx %u\n", + cidx, cq->sw_pidx); swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); cq->sw_queue[cq->sw_pidx] = swsqe->cqe; t4_swcq_produce(cq); @@ -337,7 +329,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) struct t4_swsqe *swsqe; int ret; - pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid); + pr_debug("cqid 0x%x\n", chp->cq.cqid); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); /* @@ -430,7 +422,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) u32 ptr; *count = 0; - pr_debug("%s count zero %d\n", __func__, *count); + pr_debug("count zero %d\n", *count); ptr = cq->sw_cidx; while (ptr != cq->sw_pidx) { cqe = &cq->sw_queue[ptr]; @@ -440,7 +432,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) if (++ptr == cq->size) ptr = 0; } - pr_debug("%s cq %p count %d\n", __func__, cq, *count); + pr_debug("cq %p count %d\n", cq, *count); } /* @@ -471,8 +463,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, if (ret) return ret; - pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", - __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), + pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", + CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), CQE_WRID_LOW(hw_cqe)); @@ -603,8 +595,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { struct t4_swsqe *swsqe; - pr_debug("%s out of order completion going in sw_sq at idx %u\n", - __func__, CQE_WRID_SQ_IDX(hw_cqe)); + pr_debug("out of order completion going in sw_sq at idx %u\n", + CQE_WRID_SQ_IDX(hw_cqe)); swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe->cqe = *hw_cqe; swsqe->complete = 1; @@ -638,13 +630,13 @@ proc_cqe: BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); wq->sq.cidx = (uint16_t)idx; - pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx); + pr_debug("completing sq idx %u\n", wq->sq.cidx); *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; if (c4iw_wr_log) c4iw_log_wr_stats(wq, hw_cqe); t4_sq_consume(wq); } else { - pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx); + pr_debug("completing rq idx %u\n", wq->rq.cidx); *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; BUG_ON(t4_rq_empty(wq)); if (c4iw_wr_log) @@ -661,12 +653,12 @@ flush_wq: skip_cqe: if (SW_CQE(hw_cqe)) { - pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", - __func__, cq, cq->cqid, cq->sw_cidx); + pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n", + cq, cq->cqid, cq->sw_cidx); t4_swcq_consume(cq); } else { - pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", - __func__, cq, cq->cqid, cq->cidx); + pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n", + cq, cq->cqid, cq->cidx); t4_hwcq_consume(cq); } return ret; @@ -712,8 +704,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) wc->vendor_err = CQE_STATUS(&cqe); wc->wc_flags = 0; - pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n", - __func__, CQE_QPID(&cqe), + pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n", + CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), @@ -857,7 +849,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) struct c4iw_cq *chp; struct c4iw_ucontext *ucontext; - pr_debug("%s ib_cq %p\n", __func__, ib_cq); + pr_debug("ib_cq %p\n", ib_cq); chp = to_c4iw_cq(ib_cq); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); @@ -868,8 +860,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) : NULL; destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, - chp->destroy_skb); - chp->destroy_skb = NULL; + chp->destroy_skb, chp->wr_waitp); + c4iw_put_wr_wait(chp->wr_waitp); kfree(chp); return 0; } @@ -889,7 +881,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; - pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries); + pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) return ERR_PTR(-EINVAL); @@ -901,12 +893,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); + chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); + if (!chp->wr_waitp) { + ret = -ENOMEM; + goto err_free_chp; + } + c4iw_init_wr_wait(chp->wr_waitp); wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); if (!chp->destroy_skb) { ret = -ENOMEM; - goto err1; + goto err_free_wr_wait; } if (ib_context) @@ -947,9 +945,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, chp->cq.vector = vector; ret = create_cq(&rhp->rdev, &chp->cq, - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + ucontext ? &ucontext->uctx : &rhp->rdev.uctx, + chp->wr_waitp); if (ret) - goto err2; + goto err_free_skb; chp->rhp = rhp; chp->cq.size--; /* status page */ @@ -960,16 +959,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) - goto err3; + goto err_destroy_cq; if (ucontext) { ret = -ENOMEM; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) - goto err4; + goto err_remove_handle; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) - goto err5; + goto err_free_mm; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; @@ -984,7 +983,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) - goto err6; + goto err_free_mm2; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); @@ -996,23 +995,25 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } - pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", - __func__, chp->cq.cqid, chp, chp->cq.size, + pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", + chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); return &chp->ibcq; -err6: +err_free_mm2: kfree(mm2); -err5: +err_free_mm: kfree(mm); -err4: +err_remove_handle: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); -err3: +err_destroy_cq: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, - chp->destroy_skb); -err2: + chp->destroy_skb, chp->wr_waitp); +err_free_skb: kfree_skb(chp->destroy_skb); -err1: +err_free_wr_wait: + c4iw_put_wr_wait(chp->wr_waitp); +err_free_chp: kfree(chp); return ERR_PTR(ret); } |