diff options
author | Alex Estrin <alex.estrin@intel.com> | 2016-03-07 22:35:51 +0300 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-17 22:55:22 +0300 |
commit | 000a830efd370bf93083c7af484ffd84ab7fb21f (patch) | |
tree | 55bb6a6758ef629fa8ba3e4fc30d002eef83c71c /drivers/infiniband | |
parent | d0e859c32801f6793790d71dc41a9330da0da371 (diff) | |
download | linux-000a830efd370bf93083c7af484ffd84ab7fb21f.tar.xz |
IB/rdmavt: Post receive for QP in ERR state
Accordingly IB Spec post WR to receive queue must
complete with error if QP is in Error state.
Please refer to C10-42, C10-97.2.1
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Alex Estrin <alex.estrin@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/sw/rdmavt/qp.c | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index de34474b0dfb..bd82a6948dc8 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1364,6 +1364,8 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_rwq *wq = qp->r_rq.wq; unsigned long flags; + int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && + !qp->ibqp.srq; /* Check that state is OK to post receive. */ if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { @@ -1390,15 +1392,28 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, *bad_wr = wr; return -ENOMEM; } - - wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); - wqe->wr_id = wr->wr_id; - wqe->num_sge = wr->num_sge; - for (i = 0; i < wr->num_sge; i++) - wqe->sg_list[i] = wr->sg_list[i]; - /* Make sure queue entry is written before the head index. */ - smp_wmb(); - wq->head = next; + if (unlikely(qp_err_flush)) { + struct ib_wc wc; + + memset(&wc, 0, sizeof(wc)); + wc.qp = &qp->ibqp; + wc.opcode = IB_WC_RECV; + wc.wr_id = wr->wr_id; + wc.status = IB_WC_WR_FLUSH_ERR; + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); + } else { + wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); + wqe->wr_id = wr->wr_id; + wqe->num_sge = wr->num_sge; + for (i = 0; i < wr->num_sge; i++) + wqe->sg_list[i] = wr->sg_list[i]; + /* + * Make sure queue entry is written + * before the head index. + */ + smp_wmb(); + wq->head = next; + } spin_unlock_irqrestore(&qp->r_rq.lock, flags); } return 0; |