diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-10 08:03:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-10 08:03:54 +0300 |
commit | 5e7c1b75bd2a75005313574dd6e5354907005f93 (patch) | |
tree | 3fedaa89539e6c71bb60cb9a8dd952f48cdd7b99 | |
parent | 4a3033ef6e6bb4c566bd1d556de69b494d76976c (diff) | |
parent | 9554de394b7eee01606e64c3806cd43893f3037e (diff) | |
download | linux-5e7c1b75bd2a75005313574dd6e5354907005f93.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe:
"First RDMA subsystem updates for 5.5-rc. A very small set of fixes,
most people seem to still be recovering from December!
Five small driver fixes:
- Fix error flow with MR allocation in bnxt_re
- An errata work around for bnxt_re
- Misuse of the workqueue API in hfi1
- Protocol error in hfi1
- Regression in 5.5 related to the mmap rework with i40iw"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
i40iw: Remove setting of VMA private data and use rdma_user_mmap_io
IB/hfi1: Adjust flow PSN with the correct resync_psn
IB/hfi1: Don't cancel unused work item
RDMA/bnxt_re: Fix Send Work Entry state check while polling completions
RDMA/bnxt_re: Avoid freeing MR resources if dereg fails
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/iowait.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_verbs.c | 14 |
5 files changed, 27 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9b6ca15a183c..ad5112a2325f 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 958c1ff9c515..4d07d22bfa7b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Add qp to flush list of the CQ */ bnxt_qplib_add_flush_qp(qp); } else { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { - /* Before we complete, do WA 9060 */ - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, - cqe_sq_cons)) { - *lib_qp = qp; - goto out; - } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 86375947bc67..dbd96d029d8b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct i40iw_ucontext *ucontext; - u64 db_addr_offset; - u64 push_offset; + u64 db_addr_offset, push_offset, pfn; ucontext = to_ucontext(context); if (ucontext->iwdev->sc_dev.is_pf) { @@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_private_data = ucontext; } else { if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); } - if (io_remap_pfn_range(vma, vma->vm_start, - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; + pfn = vma->vm_pgoff + + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> + PAGE_SHIFT); - return 0; + return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + vma->vm_page_prot, NULL); } /** |