diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 06:38:15 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 06:38:15 +0300 |
commit | 2a3c389a0fde49b241430df806a34276568cfb29 (patch) | |
tree | 9cf35829317e8cc2aaffc4341fb824dad63fce02 /drivers/infiniband/hw/cxgb4/qp.c | |
parent | 8de262531f5fbb7458463224a7587429800c24bf (diff) | |
parent | 0b043644c0ca601cb19943a81aa1f1455dbe9461 (diff) | |
download | linux-2a3c389a0fde49b241430df806a34276568cfb29.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A smaller cycle this time. Notably we see another new driver, 'Soft
iWarp', and the deletion of an ancient unused driver for nes.
- Revise and simplify the signature offload RDMA MR APIs
- More progress on hoisting object allocation boiler plate code out
of the drivers
- Driver bug fixes and revisions for hns, hfi1, efa, cxgb4, qib,
i40iw
- Tree wide cleanups: struct_size, put_user_page, xarray, rst doc
conversion
- Removal of obsolete ib_ucm chardev and nes driver
- netlink based discovery of chardevs and autoloading of the modules
providing them
- Move more of the rdamvt/hfi1 uapi to include/uapi/rdma
- New driver 'siw' for software based iWarp running on top of netdev,
much like rxe's software RoCE.
- mlx5 feature to report events in their raw devx format to userspace
- Expose per-object counters through rdma tool
- Adaptive interrupt moderation for RDMA (DIM), sharing the DIM core
from netdev"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (194 commits)
RMDA/siw: Require a 64 bit arch
RDMA/siw: Mark expected switch fall-throughs
RDMA/core: Fix -Wunused-const-variable warnings
rdma/siw: Remove set but not used variable 's'
rdma/siw: Add missing dependencies on LIBCRC32C and DMA_VIRT_OPS
RDMA/siw: Add missing rtnl_lock around access to ifa
rdma/siw: Use proper enumerated type in map_cqe_status
RDMA/siw: Remove unnecessary kthread create/destroy printouts
IB/rdmavt: Fix variable shadowing issue in rvt_create_cq
RDMA/core: Fix race when resolving IP address
RDMA/core: Make rdma_counter.h compile stand alone
IB/core: Work on the caller socket net namespace in nldev_newlink()
RDMA/rxe: Fill in wc byte_len with IB_WC_RECV_RDMA_WITH_IMM
RDMA/mlx5: Set RDMA DIM to be enabled by default
RDMA/nldev: Added configuration of RDMA dynamic interrupt moderation to netlink
RDMA/core: Provide RDMA DIM support for ULPs
linux/dim: Implement RDMA adaptive moderation (DIM)
IB/mlx5: Report correctly tag matching rendezvous capability
docs: infiniband: add it to the driver-api bookset
IB/mlx5: Implement VHCA tunnel mechanism in DEVX
...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 95 |
1 files changed, 39 insertions, 56 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index e92b9544357a..eb9368be28c1 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -274,7 +274,6 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, (unsigned long long)virt_to_phys(wq->sq.queue), wq->rq.queue, (unsigned long long)virt_to_phys(wq->rq.queue)); - memset(wq->rq.queue, 0, wq->rq.memsize); dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); } @@ -303,7 +302,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, wq->rq.msn = 1; /* build fw_ri_res_wr */ - wr_len = sizeof *res_wr + 2 * sizeof *res; + wr_len = sizeof(*res_wr) + 2 * sizeof(*res); if (need_rq) wr_len += sizeof(*res); skb = alloc_skb(wr_len, GFP_KERNEL); @@ -439,7 +438,7 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, rem -= len; } } - len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); + len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp)); if (len) memset(dstp, 0, len); immdp->op = FW_RI_DATA_IMMD; @@ -528,7 +527,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, T4_MAX_SEND_INLINE, &plen); if (ret) return ret; - size = sizeof wqe->send + sizeof(struct fw_ri_immd) + + size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, @@ -537,7 +536,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; - size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + + size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { @@ -545,7 +544,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, wqe->send.u.immd_src[0].r1 = 0; wqe->send.u.immd_src[0].r2 = 0; wqe->send.u.immd_src[0].immdlen = 0; - size = sizeof wqe->send + sizeof(struct fw_ri_immd); + size = sizeof(wqe->send) + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); @@ -579,7 +578,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, T4_MAX_WRITE_INLINE, &plen); if (ret) return ret; - size = sizeof wqe->write + sizeof(struct fw_ri_immd) + + size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, @@ -588,7 +587,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; - size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + + size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { @@ -596,7 +595,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, wqe->write.u.immd_src[0].r1 = 0; wqe->write.u.immd_src[0].r2 = 0; wqe->write.u.immd_src[0].immdlen = 0; - size = sizeof wqe->write + sizeof(struct fw_ri_immd); + size = sizeof(wqe->write) + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); @@ -683,7 +682,7 @@ static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, } wqe->read.r2 = 0; wqe->read.r5 = 0; - *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); + *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16); return 0; } @@ -766,8 +765,8 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); if (ret) return ret; - *len16 = DIV_ROUND_UP(sizeof wqe->recv + - wr->num_sge * sizeof(struct fw_ri_sge), 16); + *len16 = DIV_ROUND_UP( + sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); return 0; } @@ -886,47 +885,21 @@ static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, { wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.r2 = 0; - *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); + *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16); return 0; } -static void free_qp_work(struct work_struct *work) -{ - struct c4iw_ucontext *ucontext; - struct c4iw_qp *qhp; - struct c4iw_dev *rhp; - - qhp = container_of(work, struct c4iw_qp, free_work); - ucontext = qhp->ucontext; - rhp = qhp->rhp; - - pr_debug("qhp %p ucontext %p\n", qhp, ucontext); - destroy_qp(&rhp->rdev, &qhp->wq, - ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); - - c4iw_put_wr_wait(qhp->wr_waitp); - kfree(qhp); -} - -static void queue_qp_free(struct kref *kref) -{ - struct c4iw_qp *qhp; - - qhp = container_of(kref, struct c4iw_qp, kref); - pr_debug("qhp %p\n", qhp); - queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); -} - void c4iw_qp_add_ref(struct ib_qp *qp) { pr_debug("ib_qp %p\n", qp); - kref_get(&to_c4iw_qp(qp)->kref); + refcount_inc(&to_c4iw_qp(qp)->qp_refcnt); } void c4iw_qp_rem_ref(struct ib_qp *qp) { pr_debug("ib_qp %p\n", qp); - kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); + if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt)) + complete(&to_c4iw_qp(qp)->qp_rel_comp); } static void add_to_fc_list(struct list_head *head, struct list_head *entry) @@ -1606,7 +1579,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; - wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); + wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term)); term = (struct terminate_message *)wqe->u.terminate.termmsg; if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { term->layer_etype = qhp->attr.layer_etype; @@ -1751,16 +1724,15 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) { pr_debug("p2p_type = %d\n", p2p_type); - memset(&init->u, 0, sizeof init->u); + memset(&init->u, 0, sizeof(init->u)); switch (p2p_type) { case FW_RI_INIT_P2PTYPE_RDMA_WRITE: init->u.write.opcode = FW_RI_RDMA_WRITE_WR; init->u.write.stag_sink = cpu_to_be32(1); init->u.write.to_sink = cpu_to_be64(1); init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; - init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + - sizeof(struct fw_ri_immd), - 16); + init->u.write.len16 = DIV_ROUND_UP( + sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16); break; case FW_RI_INIT_P2PTYPE_READ_REQ: init->u.write.opcode = FW_RI_RDMA_READ_WR; @@ -1768,7 +1740,7 @@ static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) init->u.read.to_src_lo = cpu_to_be32(1); init->u.read.stag_sink = cpu_to_be32(1); init->u.read.to_sink_lo = cpu_to_be32(1); - init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); + init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16); break; } } @@ -1782,7 +1754,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp, qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); - skb = alloc_skb(sizeof *wqe, GFP_KERNEL); + skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto out; @@ -2099,10 +2071,12 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; + struct c4iw_ucontext *ucontext; struct c4iw_qp_attributes attrs; qhp = to_c4iw_qp(ib_qp); rhp = qhp->rhp; + ucontext = qhp->ucontext; attrs.next_state = C4IW_QP_STATE_ERROR; if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) @@ -2120,7 +2094,17 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) c4iw_qp_rem_ref(ib_qp); + wait_for_completion(&qhp->qp_rel_comp); + pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); + pr_debug("qhp %p ucontext %p\n", qhp, ucontext); + + destroy_qp(&rhp->rdev, &qhp->wq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); + + c4iw_put_wr_wait(qhp->wr_waitp); + + kfree(qhp); return 0; } @@ -2230,8 +2214,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, spin_lock_init(&qhp->lock); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); - kref_init(&qhp->kref); - INIT_WORK(&qhp->free_work, free_qp_work); + init_completion(&qhp->qp_rel_comp); + refcount_set(&qhp->qp_refcnt, 1); ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); if (ret) @@ -2302,7 +2286,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ucontext->key += PAGE_SIZE; } spin_unlock(&ucontext->mmap_lock); - ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); + ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (ret) goto err_free_ma_sync_key; sq_key_mm->key = uresp.sq_key; @@ -2386,7 +2370,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct c4iw_dev *rhp; struct c4iw_qp *qhp; enum c4iw_qp_attr_mask mask = 0; - struct c4iw_qp_attributes attrs; + struct c4iw_qp_attributes attrs = {}; pr_debug("ib_qp %p\n", ibqp); @@ -2398,7 +2382,6 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (!attr_mask) return 0; - memset(&attrs, 0, sizeof attrs); qhp = to_c4iw_qp(ibqp); rhp = qhp->rhp; @@ -2482,8 +2465,8 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, { struct c4iw_qp *qhp = to_c4iw_qp(ibqp); - memset(attr, 0, sizeof *attr); - memset(init_attr, 0, sizeof *init_attr); + memset(attr, 0, sizeof(*attr)); + memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; |