diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-23 01:48:44 +0300 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-23 01:48:44 +0300 |
| commit | b8ba4526832fcccba7f46e55ce9a8b79902bdcec (patch) | |
| tree | 5f2fc306e9909c9936efc017bf2c8fde49d8c9bb /drivers/infiniband/hw/qib/qib_rc.c | |
| parent | 01cde1538e1dff4254e340f606177a870131a01f (diff) | |
| parent | 520a07bff6fbb23cac905007d74c67058b189acb (diff) | |
| download | linux-b8ba4526832fcccba7f46e55ce9a8b79902bdcec.tar.xz | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull more rdma updates from Doug Ledford:
"Round two of 4.6 merge window patches.
This is a monster pull request. I held off on the hfi1 driver updates
(the hfi1 driver is intimately tied to the qib driver and the new
rdmavt software library that was created to help both of them) in my
first pull request. The hfi1/qib/rdmavt update is probably 90% of
this pull request. The hfi1 driver is being left in staging so that
it can be fixed up in regards to the API that Al and yourself didn't
like. Intel has agreed to do the work, but in the meantime, this
clears out 300+ patches in the backlog queue and brings my tree and
their tree closer to sync.
This also includes about 10 patches to the core and a few to mlx5 to
create an infrastructure for configuring SRIOV ports on IB devices.
That series includes one patch to the net core that we sent to netdev@
and Dave Miller with each of the three revisions to the series. We
didn't get any response to the patch, so we took that as implicit
approval.
Finally, this series includes Intel's new iWARP driver for their x722
cards. It's not nearly the beast as the hfi1 driver. It also has a
linux-next merge issue, but that has been resolved and it now passes
just fine.
Summary:
- A few minor core fixups needed for the next patch series
- The IB SRIOV series. This has bounced around for several versions.
Of note is the fact that the first patch in this series effects the
net core. It was directed to netdev and DaveM for each iteration
of the series (three versions total). Dave did not object, but did
not respond either. I've taken this as permission to move forward
with the series.
- The new Intel X722 iWARP driver
- A huge set of updates to the Intel hfi1 driver. Of particular
interest here is that we have left the driver in staging since it
still has an API that people object to. Intel is working on a fix,
but getting these patches in now helps keep me sane as the upstream
and Intel's trees were over 300 patches apart"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (362 commits)
IB/ipoib: Allow mcast packets from other VFs
IB/mlx5: Implement callbacks for manipulating VFs
net/mlx5_core: Implement modify HCA vport command
net/mlx5_core: Add VF param when querying vport counter
IB/ipoib: Add ndo operations for configuring VFs
IB/core: Add interfaces to control VF attributes
IB/core: Support accessing SA in virtualized environment
IB/core: Add subnet prefix to port info
IB/mlx5: Fix decision on using MAD_IFC
net/core: Add support for configuring VF GUIDs
IB/{core, ulp} Support above 32 possible device capability flags
IB/core: Replace setting the zero values in ib_uverbs_ex_query_device
net/mlx5_core: Introduce offload arithmetic hardware capabilities
net/mlx5_core: Refactor device capability function
net/mlx5_core: Fix caching ATOMIC endian mode capability
ib_srpt: fix a WARN_ON() message
i40iw: Replace the obsolete crypto hash interface with shash
IB/hfi1: Add SDMA cache eviction algorithm
IB/hfi1: Switch to using the pin query function
IB/hfi1: Specify mm when releasing pages
...
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 409 |
1 files changed, 198 insertions, 211 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e6b7556d5221..9088e26d3ac8 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -40,7 +40,7 @@ static void rc_timeout(unsigned long arg); -static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, +static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) { u32 len; @@ -54,9 +54,9 @@ static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, return wqe->length - len; } -static void start_timer(struct qib_qp *qp) +static void start_timer(struct rvt_qp *qp) { - qp->s_flags |= QIB_S_TIMER; + qp->s_flags |= RVT_S_TIMER; qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + qp->timeout_jiffies; @@ -74,17 +74,17 @@ static void start_timer(struct qib_qp *qp) * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ -static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, +static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, struct qib_other_headers *ohdr, u32 pmtu) { - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u32 hwords; u32 len; u32 bth0; u32 bth2; /* Don't send an ACK if we aren't supposed to. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto bail; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ @@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } /* FALLTHROUGH */ @@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { - if (qp->s_flags & QIB_S_ACK_PENDING) + if (qp->s_flags & RVT_S_ACK_PENDING) goto normal; goto bail; } @@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, /* Copy SGE state in case we need to resend */ qp->s_rdma_mr = e->rdma_sge.mr; if (qp->s_rdma_mr) - qib_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; @@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; if (qp->s_rdma_mr) - qib_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) len = pmtu; @@ -196,7 +196,7 @@ normal: * (see above). */ qp->s_ack_state = OP(SEND_ONLY); - qp->s_flags &= ~QIB_S_ACK_PENDING; + qp->s_flags &= ~RVT_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) ohdr->u.aeth = @@ -218,7 +218,7 @@ normal: bail: qp->s_ack_state = OP(ACKNOWLEDGE); - qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); + qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING); return 0; } @@ -226,63 +226,60 @@ bail: * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ -int qib_make_rc_req(struct qib_qp *qp) +int qib_make_rc_req(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_other_headers *ohdr; - struct qib_sge_state *ss; - struct qib_swqe *wqe; + struct rvt_sge_state *ss; + struct rvt_swqe *wqe; u32 hwords; u32 len; u32 bth0; u32 bth2; u32 pmtu = qp->pmtu; char newreq; - unsigned long flags; int ret = 0; int delta; - ohdr = &qp->s_hdr->u.oth; + ohdr = &priv->s_hdr->u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &qp->s_hdr->u.l.oth; - - /* - * The lock is needed to synchronize between the sending tasklet, - * the receive interrupt handler, and timeout resends. - */ - spin_lock_irqsave(&qp->s_lock, flags); + ohdr = &priv->s_hdr->u.l.oth; /* Sending responses has higher priority over sending requests. */ - if ((qp->s_flags & QIB_S_RESP_PENDING) && + if ((qp->s_flags & RVT_S_RESP_PENDING) && qib_make_rc_ack(dev, qp, ohdr, pmtu)) goto done; - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { - if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_dma_busy)) { - qp->s_flags |= QIB_S_WAIT_DMA; + if (atomic_read(&priv->s_dma_busy)) { + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ goto done; } - if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) + if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) goto bail; if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { - qp->s_flags |= QIB_S_WAIT_PSN; + qp->s_flags |= RVT_S_WAIT_PSN; goto bail; } qp->s_sending_psn = qp->s_psn; @@ -294,10 +291,10 @@ int qib_make_rc_req(struct qib_qp *qp) bth0 = 0; /* Send a request. */ - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); switch (qp->s_state) { default: - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* * Resend an old request or start a new one. @@ -317,11 +314,11 @@ int qib_make_rc_req(struct qib_qp *qp) */ if ((wqe->wr.send_flags & IB_SEND_FENCE) && qp->s_num_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_FENCE; + qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } - wqe->psn = qp->s_next_psn; newreq = 1; + qp->s_psn = wqe->psn; } /* * Note that we have to be careful not to modify the @@ -335,14 +332,12 @@ int qib_make_rc_req(struct qib_qp *qp) case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(SEND_FIRST); len = pmtu; break; @@ -363,14 +358,14 @@ int qib_make_rc_req(struct qib_qp *qp) break; case IB_WR_RDMA_WRITE: - if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } @@ -380,9 +375,7 @@ int qib_make_rc_req(struct qib_qp *qp) cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; @@ -411,19 +404,12 @@ int qib_make_rc_req(struct qib_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - /* - * Adjust s_next_psn to count the - * expected number of responses. - */ - if (len > pmtu) - qp->s_next_psn += (len - 1) / pmtu; - wqe->lpsn = qp->s_next_psn++; } ohdr->u.rc.reth.vaddr = @@ -449,13 +435,12 @@ int qib_make_rc_req(struct qib_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - wqe->lpsn = wqe->psn; } if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qp->s_state = OP(COMPARE_SWAP); @@ -498,11 +483,8 @@ int qib_make_rc_req(struct qib_qp *qp) } if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_psn = wqe->lpsn + 1; - else { + else qp->s_psn++; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; - } break; case OP(RDMA_READ_RESPONSE_FIRST): @@ -522,8 +504,6 @@ int qib_make_rc_req(struct qib_qp *qp) /* FALLTHROUGH */ case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -563,8 +543,6 @@ int qib_make_rc_req(struct qib_qp *qp) /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -618,9 +596,9 @@ int qib_make_rc_req(struct qib_qp *qp) delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; if (delta && delta % QIB_PSN_CREDIT == 0) bth2 |= IB_BTH_REQ_ACK; - if (qp->s_flags & QIB_S_SEND_ONE) { - qp->s_flags &= ~QIB_S_SEND_ONE; - qp->s_flags |= QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_SEND_ONE) { + qp->s_flags &= ~RVT_S_SEND_ONE; + qp->s_flags |= RVT_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; @@ -629,13 +607,9 @@ int qib_make_rc_req(struct qib_qp *qp) qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); done: - ret = 1; - goto unlock; - + return 1; bail: - qp->s_flags &= ~QIB_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; return ret; } @@ -647,7 +621,7 @@ unlock: * Note that RDMA reads and atomics are handled in the * send side QP state and tasklet. */ -void qib_send_rc_ack(struct qib_qp *qp) +void qib_send_rc_ack(struct rvt_qp *qp) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -665,11 +639,11 @@ void qib_send_rc_ack(struct qib_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto unlock; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ - if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) + if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt) goto queue_ack; /* Construct the header with s_lock held so APM doesn't change it. */ @@ -758,9 +732,9 @@ void qib_send_rc_ack(struct qib_qp *qp) goto done; queue_ack: - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { - ibp->n_rc_qacks++; - qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { + this_cpu_inc(*ibp->rvp.rc_qacks); + qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; @@ -782,10 +756,10 @@ done: * for the given QP. * Called at interrupt level with the QP s_lock held. */ -static void reset_psn(struct qib_qp *qp, u32 psn) +static void reset_psn(struct rvt_qp *qp, u32 psn) { u32 n = qp->s_acked; - struct qib_swqe *wqe = get_swqe_ptr(qp, n); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); u32 opcode; qp->s_cur = n; @@ -808,7 +782,7 @@ static void reset_psn(struct qib_qp *qp, u32 psn) n = 0; if (n == qp->s_tail) break; - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); diff = qib_cmp24(psn, wqe->psn); if (diff < 0) break; @@ -854,22 +828,22 @@ static void reset_psn(struct qib_qp *qp, u32 psn) done: qp->s_psn = psn; /* - * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer + * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer * asynchronously before the send tasklet can get scheduled. * Doing it in qib_make_rc_req() is too late. */ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) - qp->s_flags |= QIB_S_WAIT_PSN; + qp->s_flags |= RVT_S_WAIT_PSN; } /* * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ -static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) +static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) { - struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); struct qib_ibport *ibp; if (qp->s_retry == 0) { @@ -878,7 +852,7 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) qp->s_retry = qp->s_retry_cnt; } else if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); return; } else /* XXX need to handle delayed completion */ return; @@ -887,15 +861,15 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ) - ibp->n_rc_resends++; + ibp->rvp.n_rc_resends++; else - ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; - qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | - QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | - QIB_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | + RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | + RVT_S_WAIT_ACK); if (wait) - qp->s_flags |= QIB_S_SEND_ONE; + qp->s_flags |= RVT_S_SEND_ONE; reset_psn(qp, psn); } @@ -904,16 +878,16 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) */ static void rc_timeout(unsigned long arg) { - struct qib_qp *qp = (struct qib_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; struct qib_ibport *ibp; unsigned long flags; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); - if (qp->s_flags & QIB_S_TIMER) { + if (qp->s_flags & RVT_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); - ibp->n_rc_timeouts++; - qp->s_flags &= ~QIB_S_TIMER; + ibp->rvp.n_rc_timeouts++; + qp->s_flags &= ~RVT_S_TIMER; del_timer(&qp->s_timer); qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); @@ -927,12 +901,12 @@ static void rc_timeout(unsigned long arg) */ void qib_rc_rnr_retry(unsigned long arg) { - struct qib_qp *qp = (struct qib_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & QIB_S_WAIT_RNR) { - qp->s_flags &= ~QIB_S_WAIT_RNR; + if (qp->s_flags & RVT_S_WAIT_RNR) { + qp->s_flags &= ~RVT_S_WAIT_RNR; del_timer(&qp->s_timer); qib_schedule_send(qp); } @@ -943,14 +917,14 @@ void qib_rc_rnr_retry(unsigned long arg) * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads are present. */ -static void reset_sending_psn(struct qib_qp *qp, u32 psn) +static void reset_sending_psn(struct rvt_qp *qp, u32 psn) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; u32 n = qp->s_last; /* Find the work request corresponding to the given PSN. */ for (;;) { - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); if (qib_cmp24(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_sending_psn = wqe->lpsn + 1; @@ -968,16 +942,16 @@ static void reset_sending_psn(struct qib_qp *qp, u32 psn) /* * This should be called with the QP s_lock held and interrupts disabled. */ -void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) +void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) { struct qib_other_headers *ohdr; - struct qib_swqe *wqe; + struct rvt_swqe *wqe; struct ib_wc wc; unsigned i; u32 opcode; u32 psn; - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; /* Find out where the BTH is */ @@ -1002,22 +976,30 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && - !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && - (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && + (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { - wqe = get_swqe_ptr(qp, qp->s_last); + u32 s_last; + + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1025,25 +1007,23 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } /* * If we were waiting for sends to complete before resending, * and they are now complete, restart sending. */ - if (qp->s_flags & QIB_S_WAIT_PSN && + if (qp->s_flags & RVT_S_WAIT_PSN && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { - qp->s_flags &= ~QIB_S_WAIT_PSN; + qp->s_flags &= ~RVT_S_WAIT_PSN; qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; qib_schedule_send(qp); } } -static inline void update_last_psn(struct qib_qp *qp, u32 psn) +static inline void update_last_psn(struct rvt_qp *qp, u32 psn) { qp->s_last_psn = psn; } @@ -1053,8 +1033,8 @@ static inline void update_last_psn(struct qib_qp *qp, u32 psn) * This is similar to qib_send_complete but has to check to be sure * that the SGEs are not being referenced if the SWQE is being resent. */ -static struct qib_swqe *do_rc_completion(struct qib_qp *qp, - struct qib_swqe *wqe, +static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, + struct rvt_swqe *wqe, struct qib_ibport *ibp) { struct ib_wc wc; @@ -1067,13 +1047,21 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, */ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + u32 s_last; + for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1081,12 +1069,10 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } else - ibp->n_rc_delayed_comp++; + this_cpu_inc(*ibp->rvp.rc_delayed_comp); qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, wqe->lpsn); @@ -1100,7 +1086,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_acked = qp->s_cur; - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); if (qp->s_acked != qp->s_tail) { qp->s_state = OP(SEND_LAST); qp->s_psn = wqe->psn; @@ -1110,7 +1096,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, qp->s_acked = 0; if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) qp->s_draining = 0; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); } return wqe; } @@ -1126,19 +1112,19 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, * Called at interrupt level with the QP s_lock held. * Returns 1 if OK, 0 if current operation should be aborted (NAK). */ -static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, +static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp; enum ib_wc_status status; - struct qib_swqe *wqe; + struct rvt_swqe *wqe; int ret = 0; u32 ack_psn; int diff; /* Remove QP from retry timer */ - if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } @@ -1151,7 +1137,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, ack_psn = psn; if (aeth >> 29) ack_psn--; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); /* @@ -1186,11 +1172,11 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { /* Retry this request. */ - if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { - qp->r_flags |= QIB_R_RDMAR_SEQ; + if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { + qp->r_flags |= RVT_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); @@ -1213,14 +1199,14 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { qp->s_num_rd_atomic--; /* Restart sending task if fence is complete */ - if ((qp->s_flags & QIB_S_WAIT_FENCE) && + if ((qp->s_flags & RVT_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { - qp->s_flags &= ~(QIB_S_WAIT_FENCE | - QIB_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | + RVT_S_WAIT_ACK); qib_schedule_send(qp); - } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { - qp->s_flags &= ~(QIB_S_WAIT_RDMAR | - QIB_S_WAIT_ACK); + } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { + qp->s_flags &= ~(RVT_S_WAIT_RDMAR | + RVT_S_WAIT_ACK); qib_schedule_send(qp); } } @@ -1231,7 +1217,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, switch (aeth >> 29) { case 0: /* ACK */ - ibp->n_rc_acks++; + this_cpu_inc(*ibp->rvp.rc_acks); if (qp->s_acked != qp->s_tail) { /* * We are expecting more ACKs so @@ -1248,8 +1234,8 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, qp->s_state = OP(SEND_LAST); qp->s_psn = psn + 1; } - if (qp->s_flags & QIB_S_WAIT_ACK) { - qp->s_flags &= ~QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); } qib_get_credit(qp, aeth); @@ -1260,10 +1246,10 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, goto bail; case 1: /* RNR NAK */ - ibp->n_rnr_naks++; + ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail; - if (qp->s_flags & QIB_S_WAIT_RNR) + if (qp->s_flags & RVT_S_WAIT_RNR) goto bail; if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; @@ -1275,12 +1261,12 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); - ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; reset_psn(qp, psn); - qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); - qp->s_flags |= QIB_S_WAIT_RNR; + qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); + qp->s_flags |= RVT_S_WAIT_RNR; qp->s_timer.function = qib_rc_rnr_retry; qp->s_timer.expires = jiffies + usecs_to_jiffies( ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & @@ -1296,7 +1282,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ - ibp->n_seq_naks++; + ibp->rvp.n_seq_naks++; /* * Back up to the responder's expected PSN. * Note that we might get a NAK in the middle of an @@ -1309,21 +1295,21 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, case 1: /* Invalid Request */ status = IB_WC_REM_INV_REQ_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 2: /* Remote Access Error */ status = IB_WC_REM_ACCESS_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 3: /* Remote Operation Error */ status = IB_WC_REM_OP_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; class_b: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } break; @@ -1349,18 +1335,18 @@ bail: * We have seen an out of sequence RDMA read middle or last packet. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. */ -static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, +static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; /* Remove QP from retry timer */ - if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); while (qib_cmp24(psn, wqe->lpsn) > 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || @@ -1370,11 +1356,11 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, wqe = do_rc_completion(qp, wqe, ibp); } - ibp->n_rdma_seq++; - qp->r_flags |= QIB_R_RDMAR_SEQ; + ibp->rvp.n_rdma_seq++; + qp->r_flags |= RVT_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1399,12 +1385,12 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, static void qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, - struct qib_qp *qp, + struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; struct qib_pportdata *ppd = ppd_from_ibp(ibp); enum ib_wc_status status; unsigned long flags; @@ -1425,7 +1411,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, * If send tasklet not running attempt to progress * SDMA queue. */ - if (!(qp->s_flags & QIB_S_BUSY)) { + if (!(qp->s_flags & RVT_S_BUSY)) { /* Acquire SDMA Lock */ spin_lock_irqsave(&ppd->sdma_lock, flags); /* Invoke sdma make progress */ @@ -1437,11 +1423,12 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, } spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto ack_done; /* Ignore invalid responses. */ - if (qib_cmp24(psn, qp->s_next_psn) >= 0) + smp_read_barrier_depends(); /* see post_one_send */ + if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ @@ -1460,15 +1447,15 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, * Skip everything other than the PSN we expect, if we are waiting * for a reply to a restarted RDMA read or atomic op. */ - if (qp->r_flags & QIB_R_RDMAR_SEQ) { + if (qp->r_flags & RVT_R_RDMAR_SEQ) { if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) goto ack_done; - qp->r_flags &= ~QIB_R_RDMAR_SEQ; + qp->r_flags &= ~RVT_R_RDMAR_SEQ; } if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); status = IB_WC_SUCCESS; switch (opcode) { @@ -1487,7 +1474,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; hdrsize += 4; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* @@ -1515,10 +1502,10 @@ read_middle: * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ - qp->s_flags |= QIB_S_TIMER; + qp->s_flags |= RVT_S_TIMER; mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); - if (qp->s_flags & QIB_S_WAIT_ACK) { - qp->s_flags &= ~QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); } @@ -1553,7 +1540,7 @@ read_middle: * have to be careful to copy the data to the right * location. */ - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_last; @@ -1598,7 +1585,7 @@ ack_len_err: ack_err: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1623,14 +1610,14 @@ bail: */ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, - struct qib_qp *qp, + struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - struct qib_ack_entry *e; + struct rvt_ack_entry *e; unsigned long flags; u8 i, prev; int old_req; @@ -1642,7 +1629,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * Don't queue the NAK if we already sent one. */ if (!qp->r_nak_state) { - ibp->n_rc_seqnak++; + ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; @@ -1652,7 +1639,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * Otherwise, we end up propagating congestion. */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1678,7 +1665,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, */ e = NULL; old_req = 1; - ibp->n_rc_dupreq++; + ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); @@ -1732,7 +1719,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; if (e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } if (len != 0) { @@ -1740,7 +1727,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, u64 vaddr = be64_to_cpu(reth->vaddr); int ok; - ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock_done; @@ -1791,7 +1778,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * which doesn't accept a RDMA read response or atomic * response as an ACK for earlier SENDs or RDMA writes. */ - if (!(qp->s_flags & QIB_S_RESP_PENDING)) { + if (!(qp->s_flags & RVT_S_RESP_PENDING)) { spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; @@ -1805,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, break; } qp->s_ack_state = OP(ACKNOWLEDGE); - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qp->r_nak_state = 0; qib_schedule_send(qp); @@ -1818,13 +1805,13 @@ send_ack: return 0; } -void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) +void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err) { unsigned long flags; int lastwqe; spin_lock_irqsave(&qp->s_lock, flags); - lastwqe = qib_error_qp(qp, err); + lastwqe = rvt_error_qp(qp, err); spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { @@ -1837,7 +1824,7 @@ void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) } } -static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) +static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n) { unsigned next; @@ -1862,7 +1849,7 @@ static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) * Called at interrupt level. */ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp) + int has_grh, void *data, u32 tlen, struct rvt_qp *qp) { struct qib_ibport *ibp = &rcd->ppd->ibport_data; struct qib_other_headers *ohdr; @@ -1948,8 +1935,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, break; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { - qp->r_flags |= QIB_R_COMM_EST; + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { + qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; @@ -2026,9 +2013,9 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; qib_copy_sge(&qp->r_sge, data, tlen, 1); - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); qp->r_msn++; - if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) break; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -2047,7 +2034,7 @@ send_last: wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); break; @@ -2069,7 +2056,7 @@ send_last: int ok; /* Check rkey & NAK */ - ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, + ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto nack_acc; @@ -2096,7 +2083,7 @@ send_last: goto send_last; case OP(RDMA_READ_REQUEST): { - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u32 len; u8 next; @@ -2114,7 +2101,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } reth = &ohdr->u.rc.reth; @@ -2125,7 +2112,7 @@ send_last: int ok; /* Check rkey & NAK */ - ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto nack_acc_unlck; @@ -2157,7 +2144,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; @@ -2166,7 +2153,7 @@ send_last: case OP(COMPARE_SWAP): case OP(FETCH_ADD): { struct ib_atomic_eth *ateth; - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u64 vaddr; atomic64_t *maddr; u64 sdata; @@ -2186,7 +2173,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; @@ -2196,7 +2183,7 @@ send_last: goto nack_inv_unlck; rkey = be32_to_cpu(ateth->rkey); /* Check rkey & NAK */ - if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), vaddr, rkey, IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; @@ -2208,7 +2195,7 @@ send_last: (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); - qib_put_mr(qp->r_sge.sge.mr); + rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; e->opcode = opcode; e->sent = 0; @@ -2221,7 +2208,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; @@ -2245,7 +2232,7 @@ rnr_nak: qp->r_ack_psn = qp->r_psn; /* Queue RNR NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -2257,7 +2244,7 @@ nack_op_err: qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -2271,7 +2258,7 @@ nack_inv: qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } |
