summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/irdma')
-rw-r--r--drivers/infiniband/hw/irdma/uk.c170
-rw-r--r--drivers/infiniband/hw/irdma/user.h20
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c145
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h53
5 files changed, 199 insertions, 191 deletions
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index a6e5d350a94c..16183e894da7 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -566,21 +566,37 @@ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
/**
* irdma_copy_inline_data_gen_1 - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
* @polarity: compatibility parameter
*/
-static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
- u8 polarity)
+static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
- if (len <= 16) {
- memcpy(dest, src, len);
- } else {
- memcpy(dest, src, 16);
- src += 16;
- dest = dest + 32;
- memcpy(dest, src, len - 16);
+ u32 quanta_bytes_remaining = 16;
+ int i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
}
}
@@ -612,35 +628,51 @@ static void irdma_set_mw_bind_wqe(__le64 *wqe,
/**
* irdma_copy_inline_data - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
* @polarity: polarity of wqe valid bit
*/
-static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
- u32 copy_size;
-
- dest += 8;
- if (len <= 8) {
- memcpy(dest, src, len);
- return;
- }
-
- *((u64 *)dest) = *((u64 *)src);
- len -= 8;
- src += 8;
- dest += 24; /* point to additional 32 byte quanta */
-
- while (len) {
- copy_size = len < 31 ? len : 31;
- memcpy(dest, src, copy_size);
- *(dest + 31) = inline_valid;
- len -= copy_size;
- dest += 32;
- src += copy_size;
+ u32 quanta_bytes_remaining = 8;
+ bool first_quanta = true;
+ int i;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
}
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
}
/**
@@ -679,20 +711,27 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_inline_rdma_write *op_info;
+ struct irdma_rdma_write *op_info;
u64 hdr = 0;
u32 wqe_idx;
bool read_fence = false;
+ u32 i, total_size = 0;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_rdma_write;
+ op_info = &info->op.rdma_write;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
- if (op_info->len > qp->max_inline_data)
+ if (unlikely(total_size > qp->max_inline_data))
return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
return -ENOMEM;
@@ -705,7 +744,7 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
@@ -719,7 +758,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 0,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges,
qp->swqe_polarity);
dma_wmb(); /* make sure WQE is populated before valid bit is set */
@@ -745,20 +785,27 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_post_inline_send *op_info;
+ struct irdma_post_send *op_info;
u64 hdr;
u32 wqe_idx;
bool read_fence = false;
+ u32 i, total_size = 0;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_send;
+ op_info = &info->op.send;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
+ return -EINVAL;
- if (op_info->len > qp->max_inline_data)
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
+
+ if (unlikely(total_size > qp->max_inline_data))
return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
return -ENOMEM;
@@ -773,7 +820,7 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
(info->imm_data_valid ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
@@ -789,8 +836,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
if (info->imm_data_valid)
set_64bit_val(wqe, 0,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
dma_wmb(); /* make sure WQE is populated before valid bit is set */
@@ -1002,11 +1049,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
__le64 *cqe;
struct irdma_qp_uk *qp;
struct irdma_ring *pring = NULL;
- u32 wqe_idx, q_type;
+ u32 wqe_idx;
int ret_code;
bool move_cq_head = true;
u8 polarity;
- u8 op_type;
bool ext_valid;
__le64 *ext_cqe;
@@ -1074,7 +1120,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ud_vlan_valid = false;
}
- q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
@@ -1113,8 +1159,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+ info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
- if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
@@ -1134,10 +1181,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
- if (info->imm_valid)
- info->op_type = IRDMA_OP_TYPE_REC_IMM;
- else
- info->op_type = IRDMA_OP_TYPE_REC;
if (qword3 & IRDMACQ_STAG) {
info->stag_invalid_set = true;
info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
@@ -1195,17 +1238,18 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, 24,
&wqe_qword);
- op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
- info->op_type = op_type;
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
+ wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
- if (op_type != IRDMAQP_OP_NOP) {
+ if (info->op_type != IRDMAQP_OP_NOP) {
info->wr_id = qp->sq_wrtrk_array[tail].wrid;
info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
break;
}
} while (1);
- if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
+ info->minor_err == FLUSH_PROT_ERR)
info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 2ef61923c926..d0cdf609f5e0 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -173,14 +173,6 @@ struct irdma_post_send {
u32 ah_id;
};
-struct irdma_post_inline_send {
- void *data;
- u32 len;
- u32 qkey;
- u32 dest_qp;
- u32 ah_id;
-};
-
struct irdma_post_rq_info {
u64 wr_id;
struct ib_sge *sg_list;
@@ -193,12 +185,6 @@ struct irdma_rdma_write {
struct ib_sge rem_addr;
};
-struct irdma_inline_rdma_write {
- void *data;
- u32 len;
- struct ib_sge rem_addr;
-};
-
struct irdma_rdma_read {
struct ib_sge *lo_sg_list;
u32 num_lo_sges;
@@ -241,8 +227,6 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
- struct irdma_inline_rdma_write inline_rdma_write;
- struct irdma_post_inline_send inline_send;
} op;
};
@@ -261,6 +245,7 @@ struct irdma_cq_poll_info {
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
+ u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
@@ -291,7 +276,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool post_sq);
struct irdma_wqe_uk_ops {
- void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
+ void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 8dfc9e154d73..445e69e86409 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2591,6 +2591,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, 24, &wqe_qword);
cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
/* remove the SQ WR by moving SQ tail*/
IRDMA_RING_SET_TAIL(*sq_ring,
sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
@@ -2629,6 +2630,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
/* remove the RQ WR by moving RQ tail */
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
ibdev_dbg(iwqp->iwrcq->ibcq.device,
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index a22afbb25bc5..f6973ea55eda 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -64,36 +64,6 @@ static int irdma_query_device(struct ib_device *ibdev,
}
/**
- * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
- * @link_speed: netdev phy link speed
- * @active_speed: IB port speed
- * @active_width: IB port width
- */
-static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
- u8 *active_width)
-{
- if (link_speed <= SPEED_1000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_SDR;
- } else if (link_speed <= SPEED_10000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_FDR10;
- } else if (link_speed <= SPEED_20000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_DDR;
- } else if (link_speed <= SPEED_25000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_EDR;
- } else if (link_speed <= SPEED_40000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_FDR10;
- } else {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_EDR;
- }
-}
-
-/**
* irdma_query_port - get port attributes
* @ibdev: device pointer from stack
* @port: port number for query
@@ -120,8 +90,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port,
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
- irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
- &props->active_width);
+
+ ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
@@ -1242,6 +1213,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
+ av->net_type = rdma_gid_attr_network_type(sgid_attr);
if (av->net_type == RDMA_NETWORK_IPV6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
@@ -2358,9 +2330,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
* @rf: RDMA PCI function
* @iwmr: mr pointer for this memory registration
* @use_pbles: flag if to use pble's
+ * @lvl_1_only: request only level 1 pble if true
*/
static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
- bool use_pbles)
+ bool use_pbles, bool lvl_1_only)
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
@@ -2371,7 +2344,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
if (use_pbles) {
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
- false);
+ lvl_1_only);
if (status)
return status;
@@ -2414,16 +2387,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
bool ret = true;
pg_size = iwmr->page_size;
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
if (err)
return err;
- if (use_pbles && palloc->level != PBLE_LEVEL_1) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- return -ENOMEM;
- }
-
if (use_pbles)
arr = palloc->level1.addr;
@@ -2899,7 +2866,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
case IRDMA_MEMREG_TYPE_MEM:
use_pbles = (iwmr->page_cnt != 1);
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
if (err)
goto error;
@@ -3165,30 +3132,20 @@ static int irdma_post_send(struct ib_qp *ibqp,
info.stag_to_inv = ib_wr->ex.invalidate_rkey;
}
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_send.data = (void *)(unsigned long)
- ib_wr->sg_list[0].addr;
- info.op.inline_send.len = ib_wr->sg_list[0].length;
- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
- iwqp->ibqp.qp_type == IB_QPT_GSI) {
- ah = to_iwah(ud_wr(ib_wr)->ah);
- info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
- info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
- info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
- }
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = ib_wr->sg_list;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_INLINE)
err = irdma_uk_inline_send(ukqp, &info, false);
- } else {
- info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = ib_wr->sg_list;
- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
- iwqp->ibqp.qp_type == IB_QPT_GSI) {
- ah = to_iwah(ud_wr(ib_wr)->ah);
- info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
- info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
- info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
- }
+ else
err = irdma_uk_send(ukqp, &info, false);
- }
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
@@ -3205,22 +3162,15 @@ static int irdma_post_send(struct ib_qp *ibqp,
else
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len =
- ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.addr =
- rdma_wr(ib_wr)->remote_addr;
- info.op.inline_rdma_write.rem_addr.lkey =
- rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.addr =
+ rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ if (ib_wr->send_flags & IB_SEND_INLINE)
err = irdma_uk_inline_rdma_write(ukqp, &info, false);
- } else {
- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ else
err = irdma_uk_rdma_write(ukqp, &info, false);
- }
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
@@ -3380,7 +3330,6 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
static void irdma_process_cqe(struct ib_wc *entry,
struct irdma_cq_poll_info *cq_poll_info)
{
- struct irdma_qp *iwqp;
struct irdma_sc_qp *qp;
entry->wc_flags = 0;
@@ -3388,7 +3337,6 @@ static void irdma_process_cqe(struct ib_wc *entry,
entry->wr_id = cq_poll_info->wr_id;
qp = cq_poll_info->qp_handle;
- iwqp = qp->qp_uk.back_qp;
entry->qp = qp->qp_uk.back_qp;
if (cq_poll_info->error) {
@@ -3421,42 +3369,17 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
}
- switch (cq_poll_info->op_type) {
- case IRDMA_OP_TYPE_RDMA_WRITE:
- case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
- case IRDMA_OP_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case IRDMA_OP_TYPE_SEND_INV:
- case IRDMA_OP_TYPE_SEND_SOL:
- case IRDMA_OP_TYPE_SEND_SOL_INV:
- case IRDMA_OP_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case IRDMA_OP_TYPE_FAST_REG_NSMR:
- entry->opcode = IB_WC_REG_MR;
- break;
- case IRDMA_OP_TYPE_INV_STAG:
- entry->opcode = IB_WC_LOCAL_INV;
- break;
- case IRDMA_OP_TYPE_REC_IMM:
- case IRDMA_OP_TYPE_REC:
- entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
- IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
+ set_ib_wc_op_sq(cq_poll_info, entry);
+ } else {
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
entry->wc_flags |= IB_WC_WITH_INVALIDATE;
}
- break;
- default:
- ibdev_err(&iwqp->iwdev->ibdev,
- "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
- entry->status = IB_WC_GENERAL_ERR;
- return;
}
if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index 4309b7159f42..a536e9fa85eb 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -232,6 +232,59 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
+static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry)
+{
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ entry->status = IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry, bool send_imm_support)
+{
+ /**
+ * iWARP does not support sendImm, so the presence of Imm data
+ * must be WriteImm.
+ */
+ if (!send_imm_support) {
+ entry->opcode = cq_poll_info->imm_valid ?
+ IB_WC_RECV_RDMA_WITH_IMM :
+ IB_WC_RECV;
+ return;
+ }
+
+ switch (cq_poll_info->op_type) {
+ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);