diff options
author | Kaike Wan <kaike.wan@intel.com> | 2019-01-24 08:51:59 +0300 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2019-02-06 02:07:44 +0300 |
commit | ad00889e7ca226a2bed2b210f17c93b7be1b1542 (patch) | |
tree | b2c3023fd630d8b0bab3c5a5b970a8618517b98d | |
parent | c6c231175ccdf188d443c27e5456b9e2f65e44d4 (diff) | |
download | linux-ad00889e7ca226a2bed2b210f17c93b7be1b1542.tar.xz |
IB/hfi1: Enable TID RDMA WRITE protocol
This patch enables TID RDMA WRITE protocol by converting a qualified
RDMA WRITE request into a TID RDMA WRITE request internally:
(1) The TID RDMA cability must be enabled;
(2) The request must start on a 4K page boundary;
(3) The request length must be a multiple of 4K and must be larger or
equal to 256K.
Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.h | 3 |
2 files changed, 24 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 286752011f25..db3188f66dba 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3322,6 +3322,18 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) new_opcode = IB_WR_TID_RDMA_READ; do_tid_rdma = true; } + } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { + /* + * TID RDMA is enabled for this RDMA WRITE request iff: + * 1. The remote address is page-aligned, + * 2. The length is larger than the minimum segment size, + * 3. The length is page-multiple. + */ + if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) && + !(wqe->length & ~PAGE_MASK)) { + new_opcode = IB_WR_TID_RDMA_WRITE; + do_tid_rdma = true; + } } if (do_tid_rdma) { @@ -3338,12 +3350,22 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) priv->tid_req.n_flows = remote->max_read; qpriv->tid_r_reqs++; wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1; + } else { + wqe->lpsn += priv->tid_req.total_segs - 1; + atomic_inc(&qpriv->n_requests); } priv->tid_req.cur_seg = 0; priv->tid_req.comp_seg = 0; priv->tid_req.ack_seg = 0; priv->tid_req.state = TID_REQUEST_INACTIVE; + /* + * Reset acked_tail. + * TID RDMA READ does not have ACKs so it does not + * update the pointer. We have to reset it so TID RDMA + * WRITE does not get confused. + */ + priv->tid_req.acked_tail = priv->tid_req.setup_head; trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode, wqe->psn, wqe->lpsn, &priv->tid_req); diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h index 44468188a374..53ab24ef4f02 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.h +++ b/drivers/infiniband/hw/hfi1/tid_rdma.h @@ -266,7 +266,8 @@ static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { if (wqe->priv && - wqe->wr.opcode == IB_WR_RDMA_READ && + (wqe->wr.opcode == IB_WR_RDMA_READ || + wqe->wr.opcode == IB_WR_RDMA_WRITE) && wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE) setup_tid_rdma_wqe(qp, wqe); } |