summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ipath/ipath_rc.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-04-17 08:09:32 +0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 08:09:32 +0400
commit0f39cf3d54e67a705773fd0ec56ca3dcd3e9272f (patch)
tree83f19f0014d0e880fb245906105e903dd6d733d5 /drivers/infiniband/hw/ipath/ipath_rc.c
parente7eacd36865ae0707f5efae8e4dda421ffcd1b66 (diff)
downloadlinux-0f39cf3d54e67a705773fd0ec56ca3dcd3e9272f.tar.xz
IB/core: Add support for "send with invalidate" work requests
Add a new IB_WR_SEND_WITH_INV send opcode that can be used to mark a "send with invalidate" work request as defined in the iWARP verbs and the InfiniBand base memory management extensions. Also put "imm_data" and a new "invalidate_rkey" member in a new "ex" union in struct ib_send_wr. The invalidate_rkey member can be used to pass in an R_Key/STag to be invalidated. Add this new union to struct ib_uverbs_send_wr. Add code to copy the invalidate_rkey field in ib_uverbs_post_send(). Fix up low-level drivers to deal with the change to struct ib_send_wr, and just remove the imm_data initialization from net/sunrpc/xprtrdma/, since that code never does any send with immediate operations. Also, move the existing IB_DEVICE_SEND_W_INV flag to a new bit, since the iWARP drivers currently in the tree set the bit. The amso1100 driver at least will silently fail to honor the IB_SEND_INVALIDATE bit if passed in as part of userspace send requests (since it does not implement kernel bypass work request queueing). Remove the flag from all existing drivers that set it until we know which ones are OK. The values chosen for the new flag is not consecutive to avoid clashing with flags defined in the XRC patches, which are not merged yet but which are already in use and are likely to be merged soon. This resurrects a patch sent long ago by Mikkel Hagen <mhagen@iol.unh.edu>. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 467981905bbe..c405dfba5531 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -308,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
else {
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.imm_data;
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -346,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after RETH */
- ohdr->u.rc.imm_data = wqe->wr.imm_data;
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23;
@@ -490,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.imm_data;
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -526,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
else {
qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.imm_data;
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23;