diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-27 07:08:40 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-27 07:08:40 +0300 |
commit | 780d8ce7162818cfe03e9a5e23b3af192a1d37bc (patch) | |
tree | 7af6427428434175118ee79e9a08aa66e750efa0 /drivers/infiniband/hw/irdma/verbs.c | |
parent | 090b39af591cdde897664dfa5d3d5b0c78a197fb (diff) | |
parent | 9c477178a0a187c4718c228cc6e0692564811441 (diff) | |
download | linux-780d8ce7162818cfe03e9a5e23b3af192a1d37bc.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"Small collection of incremental improvement patches:
- Minor code cleanup patches, comment improvements, etc from static
tools
- Clean the some of the kernel caps, reducing the historical stealth
uAPI leftovers
- Bug fixes and minor changes for rdmavt, hns, rxe, irdma
- Remove unimplemented cruft from rxe
- Reorganize UMR QP code in mlx5 to avoid going through the IB verbs
layer
- flush_workqueue(system_unbound_wq) removal
- Ensure rxe waits for objects to be unused before allowing the core
to free them
- Several rc quality bug fixes for hfi1"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (67 commits)
RDMA/rtrs-clt: Fix one kernel-doc comment
RDMA/hfi1: Remove all traces of diagpkt support
RDMA/hfi1: Consolidate software versions
RDMA/hfi1: Remove pointless driver version
RDMA/hfi1: Fix potential integer multiplication overflow errors
RDMA/hfi1: Prevent panic when SDMA is disabled
RDMA/hfi1: Prevent use of lock before it is initialized
RDMA/rxe: Fix an error handling path in rxe_get_mcg()
IB/core: Fix typo in comment
RDMA/core: Fix typo in comment
IB/hf1: Fix typo in comment
IB/qib: Fix typo in comment
IB/iser: Fix typo in comment
RDMA/mlx4: Avoid flush_scheduled_work() usage
IB/isert: Avoid flush_scheduled_work() usage
RDMA/mlx5: Remove duplicate pointer assignment in mlx5_ib_alloc_implicit_mr()
RDMA/qedr: Remove unnecessary synchronize_irq() before free_irq()
RDMA/hns: Use hr_reg_read() instead of remaining roce_get_xxx()
RDMA/hns: Use hr_reg_xxx() instead of remaining roce_set_xxx()
RDMA/irdma: Add SW mechanism to generate completions on error
...
Diffstat (limited to 'drivers/infiniband/hw/irdma/verbs.c')
-rw-r--r-- | drivers/infiniband/hw/irdma/verbs.c | 60 |
1 files changed, 36 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 52f3e88f8569..c4412ece5a6d 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -25,7 +25,9 @@ static int irdma_query_device(struct ib_device *ibdev, iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); - props->device_cap_flags = iwdev->device_cap_flags; + props->device_cap_flags = IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_MGT_EXTENSIONS; + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; props->vendor_id = pcidev->vendor; props->vendor_part_id = pcidev->device; @@ -533,6 +535,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) irdma_modify_qp_to_err(&iwqp->sc_qp); + if (!iwqp->user_mode) + cancel_delayed_work_sync(&iwqp->dwork_flush); + irdma_qp_rem_ref(&iwqp->ibqp); wait_for_completion(&iwqp->free_qp); irdma_free_lsmm_rsrc(iwqp); @@ -788,6 +793,14 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, return 0; } +static void irdma_flush_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); + + irdma_generate_flush_completions(iwqp); +} + /** * irdma_create_qp - create qp * @ibqp: ptr of qp @@ -907,6 +920,7 @@ static int irdma_create_qp(struct ib_qp *ibqp, init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; irdma_setup_virt_qp(iwdev, iwqp, &init_info); } else { + INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); } @@ -1398,11 +1412,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, } if (iwqp->ibqp_state > IB_QPS_RTS && !iwqp->flush_issued) { - iwqp->flush_issued = 1; spin_unlock_irqrestore(&iwqp->lock, flags); irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ | IRDMA_FLUSH_WAIT); + iwqp->flush_issued = 1; } else { spin_unlock_irqrestore(&iwqp->lock, flags); } @@ -1755,6 +1769,8 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) unsigned long flags; spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + irdma_remove_cmpls_list(iwcq); if (!list_empty(&iwcq->resize_list)) irdma_process_resize_list(iwcq, iwdev, NULL); spin_unlock_irqrestore(&iwcq->lock, flags); @@ -1959,6 +1975,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, cq->back_cq = iwcq; spin_lock_init(&iwcq->lock); INIT_LIST_HEAD(&iwcq->resize_list); + INIT_LIST_HEAD(&iwcq->cmpl_generated); info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; @@ -3044,15 +3061,12 @@ static int irdma_post_send(struct ib_qp *ibqp, unsigned long flags; bool inv_stag; struct irdma_ah *ah; - bool reflush = false; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; dev = &iwqp->iwdev->rf->sc_dev; spin_lock_irqsave(&iwqp->lock, flags); - if (iwqp->flush_issued && ukqp->sq_flush_complete) - reflush = true; while (ib_wr) { memset(&info, 0, sizeof(info)); inv_stag = false; @@ -3202,15 +3216,14 @@ static int irdma_post_send(struct ib_qp *ibqp, ib_wr = ib_wr->next; } - if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) { - irdma_uk_qp_post_wr(ukqp); + if (!iwqp->flush_issued) { + if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) + irdma_uk_qp_post_wr(ukqp); spin_unlock_irqrestore(&iwqp->lock, flags); - } else if (reflush) { - ukqp->sq_flush_complete = false; - spin_unlock_irqrestore(&iwqp->lock, flags); - irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH); } else { spin_unlock_irqrestore(&iwqp->lock, flags); + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } if (err) *bad_wr = ib_wr; @@ -3233,14 +3246,11 @@ static int irdma_post_recv(struct ib_qp *ibqp, struct irdma_post_rq_info post_recv = {}; unsigned long flags; int err = 0; - bool reflush = false; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; spin_lock_irqsave(&iwqp->lock, flags); - if (iwqp->flush_issued && ukqp->rq_flush_complete) - reflush = true; while (ib_wr) { post_recv.num_sges = ib_wr->num_sge; post_recv.wr_id = ib_wr->wr_id; @@ -3256,13 +3266,10 @@ static int irdma_post_recv(struct ib_qp *ibqp, } out: - if (reflush) { - ukqp->rq_flush_complete = false; - spin_unlock_irqrestore(&iwqp->lock, flags); - irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH); - } else { - spin_unlock_irqrestore(&iwqp->lock, flags); - } + spin_unlock_irqrestore(&iwqp->lock, flags); + if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); if (err) *bad_wr = ib_wr; @@ -3474,6 +3481,11 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc /* check the current CQ for new cqes */ while (npolled < num_entries) { ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); + if (ret == -ENOENT) { + ret = irdma_generated_cmpls(iwcq, cur_cqe); + if (!ret) + irdma_process_cqe(entry + npolled, cur_cqe); + } if (!ret) { ++npolled; cq_new_cqe = true; @@ -3555,13 +3567,13 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) promo_event = true; - if (!iwcq->armed || promo_event) { - iwcq->armed = true; + if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { iwcq->last_notify = cq_notify; irdma_uk_cq_request_notification(ukcq, cq_notify); } - if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq)) + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); |