summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
commit876f7a438e4247a948268ad77b67c494f709cc30 (patch)
treec3fa2548657920df9d80822d08ff24666e62d37d /drivers/infiniband/sw
parent86df4141869350edaa53fb994b3db2c2cca5065d (diff)
parent53dbee4926d3706ca9e03f3928fa85b5ec3bc0cc (diff)
downloadlinux-876f7a438e4247a948268ad77b67c494f709cc30.tar.xz
Merge drm/drm-next into drm-intel-gt-next
Backmerge to bring in 5.17-rc2 to introduce a common baseline to merge i915_regs changes from drm-intel-next. Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c739
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c177
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c119
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c6
23 files changed, 581 insertions, 745 deletions
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 1e24673e9318..5395a581f4bb 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -22,5 +22,4 @@ rdma_rxe-y := \
rxe_mcast.o \
rxe_task.o \
rxe_net.o \
- rxe_sysfs.o \
rxe_hw_counters.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8e0f9c489cab..fab291245366 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
-bool rxe_initialized;
-
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -290,7 +288,6 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
- rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void)
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 1bb3fb618bf5..fb9066e6f5f0 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -39,8 +39,6 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-extern bool rxe_initialized;
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d771ba8449a1..f363fe3fa414 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- unsigned long flags;
-
if (wqe->has_rd_atomic) {
wqe->has_rd_atomic = 0;
atomic_inc(&qp->req.rd_atomic);
@@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* state_lock used by requester & completer */
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
if ((qp->req.state == QP_STATE_DRAIN) &&
(qp->comp.psn == qp->req.psn)) {
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.qp_context);
}
} else {
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6848426c074f..6baaaa34458e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,14 +42,13 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->is_dying) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return;
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
- unsigned long flags;
int full;
void *addr;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
cq->is_dying = true;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
}
-void rxe_cq_cleanup(struct rxe_pool_entry *arg)
+void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
+ struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 1ca43b859d80..b1e174afb1d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_disable(struct rxe_cq *cq);
-void rxe_cq_cleanup(struct rxe_pool_entry *arg);
+void rxe_cq_cleanup(struct rxe_pool_elem *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
@@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-void rxe_mr_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_elem *arg);
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
@@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
-void rxe_mw_cleanup(struct rxe_pool_entry *arg);
+void rxe_mw_cleanup(struct rxe_pool_elem *arg);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
@@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-void rxe_qp_cleanup(struct rxe_pool_entry *arg);
+void rxe_qp_cleanup(struct rxe_pool_elem *elem);
static inline int qp_num(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 1c1d1b53312d..bd1ac88b8700 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
int err;
struct rxe_mc_grp *grp;
struct rxe_pool *pool = &rxe->mc_grp_pool;
- unsigned long flags;
if (rxe->attr.max_mcast_qp_attach == 0)
return -EINVAL;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
@@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
grp = create_grp(rxe, pool, mgid);
if (IS_ERR(grp)) {
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
err = PTR_ERR(grp);
return err;
}
done:
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
*grp_p = grp;
return 0;
}
@@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
}
}
-void rxe_mc_cleanup(struct rxe_pool_entry *arg)
+void rxe_mc_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
+ struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 53271df10e47..453ef3c9d535 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
+ u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
/* set ibmr->l/rkey and also copy into private l/rkey
@@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
if (ret)
- goto err_out;
+ return -ENOMEM;
if (both) {
ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
- if (ret) {
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- goto err_out;
- }
+ if (ret)
+ goto err_free;
}
return 0;
-err_out:
+err_free:
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ mr->cur_map_set = NULL;
return -ENOMEM;
}
@@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
pr_warn("%s: Unable to get virtual address\n",
__func__);
err = -ENOMEM;
- goto err_cleanup_map;
+ goto err_release_umem;
}
buf->addr = (uintptr_t)vaddr;
@@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0;
-err_cleanup_map:
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -699,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 9534a7fe1a98..32dd8c0b8b9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
struct rxe_pd *pd = to_rpd(ibmw->pd);
- unsigned long flags;
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
rxe_do_dealloc_mw(mw);
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
rxe_drop_ref(mw);
rxe_drop_ref(pd);
@@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
- unsigned long flags;
mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
@@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr = NULL;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
if (ret)
@@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
rxe_do_bind_mw(qp, wqe, mw, mr);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_mr:
if (mr)
rxe_drop_ref(mr);
@@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- unsigned long flags;
struct rxe_mw *mw;
int ret;
@@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_invalidate_mw(qp, mw);
if (ret)
@@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
rxe_do_invalidate_mw(mw);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_ref:
rxe_drop_ref(mw);
err:
@@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
return mw;
}
-void rxe_mw_cleanup(struct rxe_pool_entry *elem)
+void rxe_mw_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
rxe_drop_index(mw);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 2cb810cb890a..be72bdbfb4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets;
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_add(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_add(rxe->ndev, ll_addr);
}
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_del(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_del(rxe->ndev, ll_addr);
}
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
@@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
else
err = rxe_send(skb, pkt);
if (err) {
- rxe->xmit_errors++;
rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 3ef5a10a6efd..df596ba7527d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
[IB_OPCODE_RC_SEND_FIRST] = {
.name = "IB_OPCODE_RC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
}
},
[IB_OPCODE_RC_SEND_MIDDLE] = {
- .name = "IB_OPCODE_RC_SEND_MIDDLE]",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .name = "IB_OPCODE_RC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST] = {
.name = "IB_OPCODE_RC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY] = {
.name = "IB_OPCODE_RC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
- .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
@@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMACK_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_COMPARE_SWAP] = {
.name = "IB_OPCODE_RC_COMPARE_SWAP",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_FETCH_ADD] = {
.name = "IB_OPCODE_RC_FETCH_ADD",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_END_MASK | RXE_START_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = {
.name = "IB_OPCODE_UC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_MIDDLE] = {
.name = "IB_OPCODE_UC_SEND_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST] = {
.name = "IB_OPCODE_UC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_SEND_ONLY] = {
.name = "IB_OPCODE_UC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
/* RD */
[IB_OPCODE_RD_SEND_FIRST] = {
.name = "IB_OPCODE_RD_SEND_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_MIDDLE] = {
.name = "IB_OPCODE_RD_SEND_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST] = {
.name = "IB_OPCODE_RD_SEND_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY] = {
.name = "IB_OPCODE_RD_SEND_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
- + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
+ RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK
- | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_COMPARE_SWAP] = {
.name = "RD_COMPARE_SWAP",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_FETCH_ADD] = {
.name = "IB_OPCODE_RD_FETCH_ADD",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
/* UD */
[IB_OPCODE_UD_SEND_ONLY] = {
.name = "IB_OPCODE_UD_SEND_ONLY",
- .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 2e80bb6aa957..4cb003885e00 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -5,13 +5,14 @@
*/
#include "rxe.h"
-#include "rxe_loc.h"
+
+#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
u32 min_index;
u32 max_index;
@@ -21,19 +22,19 @@ static const struct rxe_type_info {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
- .elem_offset = offsetof(struct rxe_ucontext, pelem),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
- .elem_offset = offsetof(struct rxe_pd, pelem),
+ .elem_offset = offsetof(struct rxe_pd, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .elem_offset = offsetof(struct rxe_ah, pelem),
+ .elem_offset = offsetof(struct rxe_ah, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
@@ -41,7 +42,7 @@ static const struct rxe_type_info {
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
- .elem_offset = offsetof(struct rxe_srq, pelem),
+ .elem_offset = offsetof(struct rxe_srq, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -49,7 +50,7 @@ static const struct rxe_type_info {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
- .elem_offset = offsetof(struct rxe_qp, pelem),
+ .elem_offset = offsetof(struct rxe_qp, elem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
@@ -58,14 +59,14 @@ static const struct rxe_type_info {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
- .elem_offset = offsetof(struct rxe_cq, pelem),
+ .elem_offset = offsetof(struct rxe_cq, elem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mr),
- .elem_offset = offsetof(struct rxe_mr, pelem),
+ .elem_offset = offsetof(struct rxe_mr, elem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_MR_INDEX,
@@ -74,7 +75,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mw),
- .elem_offset = offsetof(struct rxe_mw, pelem),
+ .elem_offset = offsetof(struct rxe_mw, elem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_MW_INDEX,
@@ -83,7 +84,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
- .elem_offset = offsetof(struct rxe_mc_grp, pelem),
+ .elem_offset = offsetof(struct rxe_mc_grp, elem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -92,15 +93,10 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, pelem),
+ .elem_offset = offsetof(struct rxe_mc_elem, elem),
},
};
-static inline const char *pool_name(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].name;
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -130,35 +126,36 @@ int rxe_pool_init(
enum rxe_elem_type type,
unsigned int max_elem)
{
+ const struct rxe_type_info *info = &rxe_type_info[type];
int err = 0;
- size_t size = rxe_type_info[type].size;
memset(pool, 0, sizeof(*pool));
pool->rxe = rxe;
+ pool->name = info->name;
pool->type = type;
pool->max_elem = max_elem;
- pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
- pool->flags = rxe_type_info[type].flags;
- pool->index.tree = RB_ROOT;
- pool->key.tree = RB_ROOT;
- pool->cleanup = rxe_type_info[type].cleanup;
+ pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
+ pool->elem_offset = info->elem_offset;
+ pool->flags = info->flags;
+ pool->cleanup = info->cleanup;
atomic_set(&pool->num_elem, 0);
rwlock_init(&pool->pool_lock);
- if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
- err = rxe_pool_init_index(pool,
- rxe_type_info[type].max_index,
- rxe_type_info[type].min_index);
+ if (pool->flags & RXE_POOL_INDEX) {
+ pool->index.tree = RB_ROOT;
+ err = rxe_pool_init_index(pool, info->max_index,
+ info->min_index);
if (err)
goto out;
}
- if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key.key_offset = rxe_type_info[type].key_offset;
- pool->key.key_size = rxe_type_info[type].key_size;
+ if (pool->flags & RXE_POOL_KEY) {
+ pool->key.tree = RB_ROOT;
+ pool->key.key_offset = info->key_offset;
+ pool->key.key_size = info->key_size;
}
out:
@@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
{
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
- pool_name(pool));
+ pool->name);
- bitmap_free(pool->index.table);
+ if (pool->flags & RXE_POOL_INDEX)
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool)
return index + pool->index.min_index;
}
-static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, index_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, key_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
- (u8 *)new + pool->key.key_offset, pool->key.key_size);
+ (u8 *)new + pool->key.key_offset,
+ pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
return err;
}
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_key_locked(elem, key);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
rb_erase(&elem->key_node, &pool->key.tree);
}
-void __rxe_drop_key(struct rxe_pool_entry *elem)
+void __rxe_drop_key(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_key_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
-int __rxe_add_index_locked(struct rxe_pool_entry *elem)
+int __rxe_add_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem)
return err;
}
-int __rxe_add_index(struct rxe_pool_entry *elem)
+int __rxe_add_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
rb_erase(&elem->index_node, &pool->index.tree);
}
-void __rxe_drop_index(struct rxe_pool_entry *elem)
+void __rxe_drop_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_ATOMIC);
+ obj = kzalloc(pool->elem_size, GFP_ATOMIC);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -357,20 +352,20 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_KERNEL);
+ obj = kzalloc(pool->elem_size, GFP_KERNEL);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -380,12 +375,13 @@ out_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
elem->pool = pool;
+ elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
return 0;
@@ -397,17 +393,16 @@ out_cnt:
void rxe_elem_release(struct kref *kref)
{
- struct rxe_pool_entry *elem =
- container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool_elem *elem =
+ container_of(kref, struct rxe_pool_elem, ref_cnt);
struct rxe_pool *pool = elem->pool;
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- u8 *obj;
+ void *obj;
if (pool->cleanup)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
kfree(obj);
}
@@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, index_node);
+ elem = rb_entry(node, struct rxe_pool_elem, index_node);
if (elem->index > index)
node = node->rb_left;
@@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_index_locked(pool, index);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
int cmp;
node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, key_node);
+ elem = rb_entry(node, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
key, pool->key.key_size);
@@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 8ecd9f870aea..214279310f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -7,9 +7,6 @@
#ifndef RXE_POOL_H
#define RXE_POOL_H
-#define RXE_POOL_ALIGN (16)
-#define RXE_POOL_CACHE_FLAGS (0)
-
enum rxe_pool_flags {
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
@@ -30,10 +27,9 @@ enum rxe_elem_type {
RXE_NUM_TYPES, /* keep me last */
};
-struct rxe_pool_entry;
-
-struct rxe_pool_entry {
+struct rxe_pool_elem {
struct rxe_pool *pool;
+ void *obj;
struct kref ref_cnt;
struct list_head list;
@@ -47,14 +43,16 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
+ const char *name;
rwlock_t pool_lock; /* protects pool add/del/search */
- size_t elem_size;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
+ size_t elem_size;
+ size_t elem_offset;
/* only used if indexed */
struct {
@@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
-int __rxe_add_index_locked(struct rxe_pool_entry *elem);
+int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-int __rxe_add_index(struct rxe_pool_entry *elem);
+int __rxe_add_index(struct rxe_pool_elem *elem);
-#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
/* drop an index and remove object from rb tree
* holding and not holding the pool_lock
*/
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-void __rxe_drop_index(struct rxe_pool_entry *elem);
+void __rxe_drop_index(struct rxe_pool_elem *elem);
-#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
-void __rxe_drop_key(struct rxe_pool_entry *elem);
+void __rxe_drop_key(struct rxe_pool_elem *elem);
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
@@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
void rxe_elem_release(struct kref *kref);
/* take a reference on an object */
-#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
/* drop a reference on an object */
-#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 54b8711321c1..5018b9387694 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->attr.path_mtu = 1;
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
- qpn = qp->pelem.index;
+ qpn = qp->elem.index;
port = &rxe->port;
switch (init->qp_type) {
@@ -217,8 +217,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
* the port number must be in the Dynamic Ports range
* (0xc000 - 0xffff).
*/
- qp->src_port = RXE_ROCE_V2_SPORT +
- (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+ qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
qp->sq.max_wr = init->cap.max_send_wr;
/* These caps are limited by rxe_qp_chk_cap() done by the caller */
@@ -832,9 +831,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+void rxe_qp_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 6e6e023c1b45..a1b283dd2d4c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
struct rxe_queue *new_q;
unsigned int num_elem = *num_elem_p;
int err;
- unsigned long flags = 0, flags1;
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
@@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
goto err1;
}
- spin_lock_irqsave(consumer_lock, flags1);
+ spin_lock_bh(consumer_lock);
if (producer_lock) {
- spin_lock_irqsave(producer_lock, flags);
+ spin_lock_bh(producer_lock);
err = resize_finish(q, new_q, num_elem);
- spin_unlock_irqrestore(producer_lock, flags);
+ spin_unlock_bh(producer_lock);
} else {
err = resize_finish(q, new_q, num_elem);
}
- spin_unlock_irqrestore(consumer_lock, flags1);
+ spin_unlock_bh(consumer_lock);
rxe_queue_cleanup(new_q); /* new/old dep on err */
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0c9d2af15f3d..5eb89052dd66 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
- unsigned long flags;
struct rxe_queue *q = qp->sq.queue;
unsigned int index = qp->req.wqe_index;
unsigned int cons;
@@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
/* check to see if we are drained;
* state_lock used by requester and completer
*/
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
do {
if (qp->req.state != QP_STATE_DRAIN) {
/* comp just finished */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
if (wqe && ((index != cons) ||
(wqe->state != wqe_state_posted))) {
/* comp not done yet */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int pad = (-payload) & 0x3;
int paylen;
int solicited;
- u16 pkey;
u32 qp_num;
int ack_req;
@@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
- pkey = IB_DEFAULT_PKEY_FULL;
-
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
@@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (ack_req)
qp->req.noack_pkts = 0;
- bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
ack_req, pkt->psn);
/* init optional headers */
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index eb1c4c3b3a78..0c0721f04357 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->pelem.index;
+ srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
deleted file mode 100644
index 666202ddff48..000000000000
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/*
- * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- */
-
-#include "rxe.h"
-#include "rxe_net.h"
-
-/* Copy argument and remove trailing CR. Return the new length. */
-static int sanitize_arg(const char *val, char *intf, int intf_len)
-{
- int len;
-
- if (!val)
- return 0;
-
- /* Remove newline. */
- for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
- intf[len] = val[len];
- intf[len] = 0;
-
- if (len == 0 || (val[len] != 0 && val[len] != '\n'))
- return 0;
-
- return len;
-}
-
-static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
-{
- int len;
- int err = 0;
- char intf[32];
- struct net_device *ndev;
- struct rxe_dev *exists;
-
- if (!rxe_initialized) {
- pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
- return -EAGAIN;
- }
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- ndev = dev_get_by_name(&init_net, intf);
- if (!ndev) {
- pr_err("interface %s not found\n", intf);
- return -EINVAL;
- }
-
- if (is_vlan_dev(ndev)) {
- pr_err("rxe creation allowed on top of a real device only\n");
- err = -EPERM;
- goto err;
- }
-
- exists = rxe_get_dev_from_net(ndev);
- if (exists) {
- ib_device_put(&exists->ib_dev);
- pr_err("already configured on %s\n", intf);
- err = -EINVAL;
- goto err;
- }
-
- err = rxe_net_add("rxe%d", ndev);
- if (err) {
- pr_err("failed to add %s\n", intf);
- goto err;
- }
-
-err:
- dev_put(ndev);
- return err;
-}
-
-static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
-{
- int len;
- char intf[32];
- struct ib_device *ib_dev;
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- if (strncmp("all", intf, len) == 0) {
- pr_info("rxe_sys: remove all");
- ib_unregister_driver(RDMA_DRIVER_RXE);
- return 0;
- }
-
- ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
- if (!ib_dev) {
- pr_err("not configured on %s\n", intf);
- return -EINVAL;
- }
-
- ib_unregister_device_and_put(ib_dev);
-
- return 0;
-}
-
-static const struct kernel_param_ops rxe_add_ops = {
- .set = rxe_param_set_add,
-};
-
-static const struct kernel_param_ops rxe_remove_ops = {
- .set = rxe_param_set_remove,
-};
-
-module_param_cb(add, &rxe_add_ops, NULL, 0200);
-MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
-module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
-MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6951fdcb31bf..0c4db5bb17d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t)
{
int cont;
int ret;
- unsigned long flags;
struct rxe_task *task = from_tasklet(task, t, tasklet);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_START:
task->state = TASK_STATE_BUSY;
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
break;
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
fallthrough;
case TASK_STATE_ARMED:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
return;
default:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t)
cont = 0;
ret = task->func(task->arg);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
if (ret)
@@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t)
pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (cont);
task->ret = ret;
@@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
- unsigned long flags;
bool idle;
/*
@@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task)
task->destroyed = true;
do {
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
idle = (task->state == TASK_STATE_START);
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (!idle);
tasklet_kill(&task->tasklet);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0aa0d7e52773..915ad6664321 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
/* create index > 0 */
rxe_add_index(ah);
- ah->ah_num = ah->pelem.index;
+ ah->ah_num = ah->elem.index;
if (uresp) {
/* only if new user provider */
@@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
int err = 0;
- unsigned long flags;
struct rxe_srq *srq = to_rsrq(ibsrq);
- spin_lock_irqsave(&srq->rq.producer_lock, flags);
+ spin_lock_bh(&srq->rq.producer_lock);
while (wr) {
err = post_one_recv(&srq->rq, wr);
@@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+ spin_unlock_bh(&srq->rq.producer_lock);
if (err)
*bad_wr = wr;
@@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
goto err1;
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
return 0;
err1:
@@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
int err;
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
- unsigned long flags;
int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
return err;
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ spin_lock_bh(&qp->sq.sq_lock);
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return -ENOMEM;
}
@@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return 0;
}
@@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_rq *rq = &qp->rq;
- unsigned long flags;
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr;
@@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto err1;
}
- spin_lock_irqsave(&rq->producer_lock, flags);
+ spin_lock_bh(&rq->producer_lock);
while (wr) {
err = post_one_recv(rq, wr);
@@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&rq->producer_lock, flags);
+ spin_unlock_bh(&rq->producer_lock);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
@@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int i;
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
@@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return i;
}
@@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
- unsigned long irq_flags;
int ret = 0;
int empty;
- spin_lock_irqsave(&cq->cq_lock, irq_flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
@@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
- spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+ spin_unlock_bh(&cq->cq_lock);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 35e041450090..e48969e8d4c8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
struct rxe_ucontext {
struct ib_ucontext ibuc;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_pd {
struct ib_pd ibpd;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_ah {
struct ib_ah ibah;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_av av;
bool is_user;
int ah_num;
@@ -60,7 +60,7 @@ struct rxe_cqe {
struct rxe_cq {
struct ib_cq ibcq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
@@ -95,7 +95,7 @@ struct rxe_rq {
struct rxe_srq {
struct ib_srq ibsrq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
@@ -209,7 +209,7 @@ struct rxe_resp_info {
struct rxe_qp {
struct ib_qp ibqp;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
@@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey)
}
struct rxe_mr {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_mr ibmr;
struct ib_umem *umem;
@@ -342,7 +342,7 @@ enum rxe_mw_state {
struct rxe_mw {
struct ib_mw ibmw;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t lock;
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
@@ -354,7 +354,7 @@ struct rxe_mw {
};
struct rxe_mc_grp {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
@@ -365,7 +365,7 @@ struct rxe_mc_grp {
};
struct rxe_mc_elem {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct list_head qp_list;
struct list_head grp_list;
struct rxe_qp *qp;
@@ -392,8 +392,6 @@ struct rxe_dev {
struct net_device *ndev;
- int xmit_errors;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *elem);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 9093e6a80b26..e5c586913d0b 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -98,15 +98,14 @@ static int siw_create_tx_threads(void)
continue;
siw_tx_thread[cpu] =
- kthread_create(siw_run_sq, (unsigned long *)(long)cpu,
- "siw_tx/%d", cpu);
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
if (IS_ERR(siw_tx_thread[cpu])) {
siw_tx_thread[cpu] = NULL;
continue;
}
- kthread_bind(siw_tx_thread[cpu], cpu);
- wake_up_process(siw_tx_thread[cpu]);
assigned++;
}
return assigned;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 1b36350601fa..a3dd2cb6d5c9 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/xarray.h>
+#include <net/addrconf.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_id = SIW_VENDOR_ID;
attr->vendor_part_id = sdev->vendor_part_id;
- memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ sdev->netdev->dev_addr);
return 0;
}
@@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
kbuf += core_sge->length;
core_sge++;
}
- sqe->sge[0].length = bytes > 0 ? bytes : 0;
+ sqe->sge[0].length = max(bytes, 0);
sqe->num_sge = bytes > 0 ? 1 : 0;
return bytes;