summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb4/qp.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2015-10-13 19:11:30 +0300
committerDoug Ledford <dledford@redhat.com>2015-10-29 05:27:18 +0300
commit8376b86de7d35d43cf1a33a1f43bc015b5a095d9 (patch)
treec75cb4e7b6a80d6bc2a50017db91444a35564b95 /drivers/infiniband/hw/cxgb4/qp.c
parent14fb4171ab1c298101697fe844dbf062ff6f4adf (diff)
downloadlinux-8376b86de7d35d43cf1a33a1f43bc015b5a095d9.tar.xz
iw_cxgb4: Support the new memory registration API
Support the new memory registration API by allocating a private page list array in c4iw_mr and populate it when c4iw_map_mr_sg is invoked. Also, support IB_WR_REG_MR by duplicating build_fastreg just take the needed information from different places: - page_size, iova, length (ib_mr) - page array (c4iw_mr) - key, access flags (ib_reg_wr) The IB_WR_FAST_REG_MR handlers will be removed later when all the ULPs will be converted. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Acked-by: Christoph Hellwig <hch@lst.de> Tested-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c74
1 files changed, 74 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 1dc9f11a4243..aac75a068768 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -605,10 +605,76 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
+static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
+ struct ib_reg_wr *wr, u8 *len16, u8 t5dev)
+{
+ struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
+ struct fw_ri_immd *imdp;
+ __be64 *p;
+ int i;
+ int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
+ int rem;
+
+ if (mhp->mpl_len > t4_max_fr_depth(use_dsgl))
+ return -EINVAL;
+
+ wqe->fr.qpbinde_to_dcacpu = 0;
+ wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
+ wqe->fr.addr_type = FW_RI_VA_BASED_TO;
+ wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
+ wqe->fr.len_hi = 0;
+ wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
+ wqe->fr.stag = cpu_to_be32(wr->key);
+ wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
+ 0xffffffff);
+
+ if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ struct fw_ri_dsgl *sglp;
+
+ for (i = 0; i < mhp->mpl_len; i++)
+ mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < mhp->mpl_len; i++) {
+ *p = cpu_to_be64((u64)mhp->mpl[i]);
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+ + pbllen, 16);
+ }
+ return 0;
+}
+
static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *send_wr, u8 *len16, u8 t5dev)
{
struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
+
struct fw_ri_immd *imdp;
__be64 *p;
int i;
@@ -815,6 +881,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp->rhp->rdev.lldi.adapter_type) ?
1 : 0);
break;
+ case IB_WR_REG_MR:
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ swsqe->opcode = FW_RI_FAST_REGISTER;
+ err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
+ is_t5(
+ qhp->rhp->rdev.lldi.adapter_type) ?
+ 1 : 0);
+ break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;