diff options
Diffstat (limited to 'drivers/infiniband/hw/hns')
-rw-r--r-- | drivers/infiniband/hw/hns/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_ah.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 45 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 629 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 96 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 123 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_mr.c | 212 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_qp.c | 41 |
9 files changed, 791 insertions, 366 deletions
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index fddb5fdf92de..21c2100b2ea9 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_HNS tristate "HNS RoCE Driver" depends on NET_VENDOR_HISILICON + depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS depends on ARM64 || (COMPILE_TEST && 64BIT) ---help--- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 0d96c5bb38cd..9990dc9eb96a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct hns_roce_ah *ah; u16 vlan_tag = 0xffff; const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + bool vlan_en = false; ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) @@ -58,8 +59,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); gid_attr = ah_attr->grh.sgid_attr; - if (is_vlan_dev(gid_attr->ndev)) + if (is_vlan_dev(gid_attr->ndev)) { vlan_tag = vlan_dev_vlan_id(gid_attr->ndev); + vlan_en = true; + } if (vlan_tag < 0x1000) vlan_tag |= (rdma_ah_get_sl(ah_attr) & @@ -71,6 +74,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, HNS_ROCE_PORT_NUM_SHIFT)); ah->av.gid_index = grh->sgid_index; ah->av.vlan = cpu_to_le16(vlan_tag); + ah->av.vlan_en = vlan_en; dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, ah->av.vlan); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 9a24fd0ee3e7..d39bdfdb5de9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -88,8 +88,11 @@ #define BITMAP_RR 1 #define MR_TYPE_MR 0x00 +#define MR_TYPE_FRMR 0x01 #define MR_TYPE_DMA 0x03 +#define HNS_ROCE_FRMR_MAX_PA 512 + #define PKEY_ID 0xffff #define GUID_LEN 8 #define NODE_DESC_SIZE 64 @@ -193,6 +196,9 @@ enum { HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3), HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4), + HNS_ROCE_CAP_FLAG_MW = BIT(7), + HNS_ROCE_CAP_FLAG_FRMR = BIT(8), + HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), }; enum hns_roce_mtt_type { @@ -219,19 +225,11 @@ struct hns_roce_uar { unsigned long logic_idx; }; -struct hns_roce_vma_data { - struct list_head list; - struct vm_area_struct *vma; - struct mutex *vma_list_mutex; -}; - struct hns_roce_ucontext { struct ib_ucontext ibucontext; struct hns_roce_uar uar; struct list_head page_list; struct mutex page_mutex; - struct list_head vma_list; - struct mutex vma_list_mutex; }; struct hns_roce_pd { @@ -293,6 +291,16 @@ struct hns_roce_mtt { enum hns_roce_mtt_type mtt_type; }; +struct hns_roce_mw { + struct ib_mw ibmw; + u32 pdn; + u32 rkey; + int enabled; /* MW's active status */ + u32 pbl_hop_num; + u32 pbl_ba_pg_sz; + u32 pbl_buf_pg_sz; +}; + /* Only support 4K page size for mr register */ #define MR_SIZE_4K 0 @@ -304,6 +312,7 @@ struct hns_roce_mr { u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ u32 access;/* Access permission of MR */ + u32 npages; int enabled; /* MR's active status */ int type; /* MR's register type */ u64 *pbl_buf;/* MR's PBL space */ @@ -457,6 +466,7 @@ struct hns_roce_av { u8 dgid[HNS_ROCE_GID_SIZE]; u8 mac[6]; __le16 vlan; + bool vlan_en; }; struct hns_roce_ah { @@ -656,6 +666,7 @@ struct hns_roce_eq_table { }; struct hns_roce_caps { + u64 fw_ver; u8 num_ports; int gid_table_len[HNS_ROCE_MAX_PORTS]; int pkey_table_len[HNS_ROCE_MAX_PORTS]; @@ -665,7 +676,9 @@ struct hns_roce_caps { u32 max_sq_sg; /* 2 */ u32 max_sq_inline; /* 32 */ u32 max_rq_sg; /* 2 */ + u32 max_extend_sg; int num_qps; /* 256k */ + int reserved_qps; u32 max_wqes; /* 16k */ u32 max_sq_desc_sz; /* 64 */ u32 max_rq_desc_sz; /* 64 */ @@ -738,6 +751,7 @@ struct hns_roce_work { struct hns_roce_dev *hr_dev; struct work_struct work; u32 qpn; + u32 cqn; int event_type; int sub_type; }; @@ -764,6 +778,8 @@ struct hns_roce_hw { struct hns_roce_mr *mr, int flags, u32 pdn, int mr_access_flags, u64 iova, u64 size, void *mb_buf); + int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); + int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle, int nent, u32 vector); @@ -863,6 +879,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) return container_of(ibmr, struct hns_roce_mr, ibmr); } +static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct hns_roce_mw, ibmw); +} + static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) { return container_of(ibqp, struct hns_roce_qp, ibqp); @@ -968,12 +989,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata); +struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr); int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, struct hns_roce_cmd_mailbox *mailbox, unsigned long mpt_index); unsigned long key_to_hw_index(u32 key); +struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type, + struct ib_udata *udata); +int hns_roce_dealloc_mw(struct ib_mw *ibmw); + void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, struct hns_roce_buf *buf); int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 081aa91fc162..ca05810c92dc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -731,7 +731,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) cq_init_attr.comp_vector = 0; cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL); if (IS_ERR(cq)) { - dev_err(dev, "Create cq for reseved loop qp failed!"); + dev_err(dev, "Create cq for reserved loop qp failed!"); return -ENOMEM; } free_mr->mr_free_cq = to_hr_cq(cq); @@ -744,7 +744,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL); if (IS_ERR(pd)) { - dev_err(dev, "Create pd for reseved loop qp failed!"); + dev_err(dev, "Create pd for reserved loop qp failed!"); ret = -ENOMEM; goto alloc_pd_failed; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 0218c0f8c2a7..a4c62ae23a9a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -54,6 +54,59 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, dseg->len = cpu_to_le32(sg->length); } +static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + struct hns_roce_wqe_frmr_seg *fseg, + const struct ib_reg_wr *wr) +{ + struct hns_roce_mr *mr = to_hr_mr(wr->mr); + + /* use ib_access_flags */ + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S, + wr->access & IB_ACCESS_MW_BIND ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S, + wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_FRMR_WQE_BYTE_4_RR_S, + wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_FRMR_WQE_BYTE_4_RW_S, + wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_FRMR_WQE_BYTE_4_LW_S, + wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0); + + /* Data structure reuse may lead to confusion */ + rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff); + rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32); + + rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); + rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); + rc_sq_wqe->rkey = cpu_to_le32(wr->key); + rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); + + fseg->pbl_size = cpu_to_le32(mr->pbl_size); + roce_set_field(fseg->mode_buf_pg_sz, + V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M, + V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + roce_set_bit(fseg->mode_buf_pg_sz, + V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0); +} + +static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg, + const struct ib_atomic_wr *wr) +{ + if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->fetchadd_swap_data = cpu_to_le64(wr->swap); + aseg->cmp_data = cpu_to_le64(wr->compare_add); + } else { + aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add); + aseg->cmp_data = 0; + } +} + static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_ind) { @@ -121,6 +174,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, } if (wr->opcode == IB_WR_RDMA_READ) { + *bad_wr = wr; dev_err(hr_dev->dev, "Not support inline data!\n"); return -EINVAL; } @@ -179,6 +233,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); + struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; struct hns_roce_v2_db sq_db; struct ib_qp_attr attr; @@ -191,6 +246,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, int attr_mask; u32 tmp_len; int ret = 0; + u32 hr_op; u8 *smac; int nreq; int i; @@ -356,6 +412,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); + roce_set_bit(ud_sq_wqe->byte_40, + V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, + ah->av.vlan_en ? 1 : 0); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, @@ -406,99 +465,100 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); + wqe += sizeof(struct hns_roce_v2_rc_send_wqe); switch (wr->opcode) { case IB_WR_RDMA_READ: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_READ); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_SEND: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND); + hr_op = HNS_ROCE_V2_WQE_OP_SEND; break; case IB_WR_SEND_WITH_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_INV); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV; break; case IB_WR_SEND_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM; break; case IB_WR_LOCAL_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_LOCAL_INV); + hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV; + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_SEND_WQE_BYTE_4_SO_S, 1); + rc_sq_wqe->inv_key = + cpu_to_le32(wr->ex.invalidate_rkey); + break; + case IB_WR_REG_MR: + hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR; + fseg = wqe; + set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr)); break; case IB_WR_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP; break; case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD; break; default: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_MASK); + hr_op = HNS_ROCE_V2_WQE_OP_MASK; break; } - wqe += sizeof(struct hns_roce_v2_rc_send_wqe); + roce_set_field(rc_sq_wqe->byte_4, + V2_RC_SEND_WQE_BYTE_4_OPCODE_M, + V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + struct hns_roce_v2_wqe_data_seg *dseg; + + dseg = wqe; + set_data_seg_v2(dseg, wr->sg_list); + wqe += sizeof(struct hns_roce_v2_wqe_data_seg); + set_atomic_seg(wqe, atomic_wr(wr)); + roce_set_field(rc_sq_wqe->byte_16, + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, + wr->num_sge); + } else if (wr->opcode != IB_WR_REG_MR) { + ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, + wqe, &sge_ind, bad_wr); + if (ret) + goto out; + } - ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe, - &sge_ind, bad_wr); - if (ret) - goto out; ind++; } else { dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); @@ -935,7 +995,24 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) resp = (struct hns_roce_query_version *)desc.data; hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); - hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id); + hr_dev->vendor_id = hr_dev->pci_dev->vendor; + + return 0; +} + +static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_query_fw_info *resp; + struct hns_roce_cmq_desc desc; + int ret; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + + resp = (struct hns_roce_query_fw_info *)desc.data; + hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); return 0; } @@ -1158,6 +1235,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ret = hns_roce_cmq_query_hw_info(hr_dev); if (ret) { + dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n", + ret); + return ret; + } + + ret = hns_roce_query_fw_ver(hr_dev); + if (ret) { dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n", ret); return ret; @@ -1185,14 +1269,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) return ret; } - hr_dev->vendor_part_id = 0; - hr_dev->sys_image_guid = 0; + + hr_dev->vendor_part_id = hr_dev->pci_dev->device; + hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; + caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; caps->num_uars = HNS_ROCE_V2_UAR_NUM; @@ -1222,6 +1308,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->reserved_mrws = 1; caps->reserved_uars = 0; caps->reserved_cqs = 0; + caps->reserved_qps = HNS_ROCE_V2_RSV_QPS; caps->qpc_ba_pg_sz = 0; caps->qpc_buf_pg_sz = 0; @@ -1255,6 +1342,11 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) HNS_ROCE_CAP_FLAG_RQ_INLINE | HNS_ROCE_CAP_FLAG_RECORD_DB | HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; + + if (hr_dev->pci_dev->revision == 0x21) + caps->flags |= HNS_ROCE_CAP_FLAG_MW | + HNS_ROCE_CAP_FLAG_FRMR; + caps->pkey_table_len[0] = 1; caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; @@ -1262,6 +1354,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->local_ca_ack_delay = 0; caps->max_mtu = IB_MTU_4096; + if (hr_dev->pci_dev->revision == 0x21) + caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC; + ret = hns_roce_v2_set_bt(hr_dev); if (ret) dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", @@ -1690,10 +1785,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, + mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, @@ -1817,6 +1913,88 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, return 0; } +static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + + mpt_entry = mb_buf; + memset(mpt_entry, 0, sizeof(*mpt_entry)); + + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, + V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); + roce_set_field(mpt_entry->byte_4_pd_hop_st, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, + mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, + V2_MPT_BYTE_4_PD_S, mr->pd); + + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + + mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); + + mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); + roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, + V2_MPT_BYTE_48_PBL_BA_H_S, + upper_32_bits(mr->pbl_ba >> 3)); + + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + + return 0; +} + +static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + + mpt_entry = mb_buf; + memset(mpt_entry, 0, sizeof(*mpt_entry)); + + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, + V2_MPT_BYTE_4_PD_S, mw->pdn); + roce_set_field(mpt_entry->byte_4_pd_hop_st, + V2_MPT_BYTE_4_PBL_HOP_NUM_M, + V2_MPT_BYTE_4_PBL_HOP_NUM_S, + mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? + 0 : mw->pbl_hop_num); + roce_set_field(mpt_entry->byte_4_pd_hop_st, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, + mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, + mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); + + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + + mpt_entry->lkey = cpu_to_le32(mw->rkey); + + return 0; +} + static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf, @@ -2274,6 +2452,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->src_qp = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M, V2_CQE_BYTE_32_RMT_QPN_S); + wc->slid = 0; wc->wc_flags |= (roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ? IB_WC_GRH : 0); @@ -2287,7 +2466,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->smac[5] = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_SMAC_5_M, V2_CQE_BYTE_28_SMAC_5_S); - wc->vlan_id = 0xffff; + if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { + wc->vlan_id = (u16)roce_get_field(cqe->byte_28, + V2_CQE_BYTE_28_VID_M, + V2_CQE_BYTE_28_VID_S); + } else { + wc->vlan_id = 0xffff; + } + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); wc->network_hdr_type = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_PORT_TYPE_M, @@ -2589,21 +2775,16 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0); roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0); - roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M, - V2_QPC_BYTE_60_MAPID_S, 0); + roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M, + V2_QPC_BYTE_60_TEMPID_S, 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, - V2_QPC_BYTE_60_INNER_MAP_IND_S, 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S, - 0); + roce_set_field(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S, + 0); + roce_set_bit(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SQ_DB_DOING_S, 0); + roce_set_bit(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_RQ_DB_DOING_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0); @@ -2685,7 +2866,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); - roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0); + roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S, + 0); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M, @@ -2694,8 +2876,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M, V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0); - roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S, - 0); roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M, V2_QPC_BYTE_144_RAQ_CREDIT_S, 0); roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0); @@ -2721,14 +2901,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0); - roce_set_field(context->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0); - + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0); roce_set_bit(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0); roce_set_bit(qpc_mask->byte_168_irrl_idx, @@ -2746,6 +2924,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S, 0); + roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1); + roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0); + roce_set_field(qpc_mask->byte_176_msg_pktn, V2_QPC_BYTE_176_MSG_USE_PKTN_M, V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0); @@ -2790,6 +2971,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_232_IRRL_SGE_IDX_M, V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S, + 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, + V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S, + 0); + qpc_mask->irrl_cur_sge_offset = 0; roce_set_field(qpc_mask->byte_240_irrl_tail, @@ -2955,13 +3143,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); } - roce_set_field(context->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0); } static int modify_qp_init_to_rtr(struct ib_qp *ibqp, @@ -3271,13 +3452,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - roce_set_field(context->byte_60_qpst_mapid, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_60_qpst_mapid, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0); - context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, @@ -3538,6 +3712,17 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN); } + if (is_vlan_dev(gid_attr->ndev)) { + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); + roce_set_bit(context->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); + } + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, vlan); @@ -3584,8 +3769,15 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S, 0); - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, - V2_QPC_BYTE_24_TC_S, grh->traffic_class); + if (hr_dev->pci_dev->revision == 0x21 && + gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) + roce_set_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, + grh->traffic_class >> 2); + else + roce_set_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, + grh->traffic_class); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, 0); roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, @@ -3606,9 +3798,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); /* Every status migrate must change state */ - roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, + roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S, new_state); - roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, + roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S, 0); /* SW pass context to HW */ @@ -3728,7 +3920,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, goto out; } - state = roce_get_field(context->byte_60_qpst_mapid, + state = roce_get_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { @@ -3995,13 +4187,103 @@ static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = container_of(work, struct hns_roce_work, work); + struct device *dev = irq_work->hr_dev->dev; u32 qpn = irq_work->qpn; + u32 cqn = irq_work->cqn; switch (irq_work->event_type) { + case HNS_ROCE_EVENT_TYPE_PATH_MIG: + dev_info(dev, "Path migrated succeeded.\n"); + break; + case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: + dev_warn(dev, "Path migration failed.\n"); + break; + case HNS_ROCE_EVENT_TYPE_COMM_EST: + dev_info(dev, "Communication established.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: + dev_warn(dev, "Send queue drained.\n"); + break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + dev_err(dev, "Local work queue catastrophic error.\n"); + hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); + switch (irq_work->sub_type) { + case HNS_ROCE_LWQCE_QPC_ERROR: + dev_err(dev, "QP %d, QPC error.\n", qpn); + break; + case HNS_ROCE_LWQCE_MTU_ERROR: + dev_err(dev, "QP %d, MTU error.\n", qpn); + break; + case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: + dev_err(dev, "QP %d, WQE BA addr error.\n", qpn); + break; + case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: + dev_err(dev, "QP %d, WQE addr error.\n", qpn); + break; + case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: + dev_err(dev, "QP %d, WQE shift error.\n", qpn); + break; + default: + dev_err(dev, "Unhandled sub_event type %d.\n", + irq_work->sub_type); + break; + } + break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + dev_err(dev, "Invalid request local work queue error.\n"); + hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); + break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + dev_err(dev, "Local access violation work queue error.\n"); hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); + switch (irq_work->sub_type) { + case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: + dev_err(dev, "QP %d, R_key violation.\n", qpn); + break; + case HNS_ROCE_LAVWQE_LENGTH_ERROR: + dev_err(dev, "QP %d, length error.\n", qpn); + break; + case HNS_ROCE_LAVWQE_VA_ERROR: + dev_err(dev, "QP %d, VA error.\n", qpn); + break; + case HNS_ROCE_LAVWQE_PD_ERROR: + dev_err(dev, "QP %d, PD error.\n", qpn); + break; + case HNS_ROCE_LAVWQE_RW_ACC_ERROR: + dev_err(dev, "QP %d, rw acc error.\n", qpn); + break; + case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: + dev_err(dev, "QP %d, key state error.\n", qpn); + break; + case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: + dev_err(dev, "QP %d, MR operation error.\n", qpn); + break; + default: + dev_err(dev, "Unhandled sub_event type %d.\n", + irq_work->sub_type); + break; + } + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + dev_warn(dev, "SRQ limit reach.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: + dev_warn(dev, "SRQ last wqe reach.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + dev_err(dev, "SRQ catas error.\n"); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + dev_err(dev, "CQ 0x%x access err.\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + dev_warn(dev, "CQ 0x%x overflow\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: + dev_warn(dev, "DB overflow.\n"); + break; + case HNS_ROCE_EVENT_TYPE_FLR: + dev_warn(dev, "Function level reset.\n"); break; default: break; @@ -4011,7 +4293,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work) } static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq, u32 qpn) + struct hns_roce_eq *eq, + u32 qpn, u32 cqn) { struct hns_roce_work *irq_work; @@ -4022,6 +4305,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); irq_work->hr_dev = hr_dev; irq_work->qpn = qpn; + irq_work->cqn = cqn; irq_work->event_type = eq->event_type; irq_work->sub_type = eq->sub_type; queue_work(hr_dev->irq_workq, &(irq_work->work)); @@ -4058,124 +4342,6 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq) hns_roce_write64_k(doorbell, eq->doorbell); } -static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - u32 qpn) -{ - struct device *dev = hr_dev->dev; - int sub_type; - - dev_warn(dev, "Local work queue catastrophic error.\n"); - sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - switch (sub_type) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_warn(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_warn(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_warn(dev, "QP %d, WQE shift error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", sub_type); - break; - } -} - -static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, u32 qpn) -{ - struct device *dev = hr_dev->dev; - int sub_type; - - dev_warn(dev, "Local access violation work queue error.\n"); - sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - switch (sub_type) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_warn(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_warn(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_warn(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_warn(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_warn(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_warn(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", sub_type); - break; - } -} - -static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type, u32 qpn) -{ - struct device *dev = hr_dev->dev; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_COMM_EST: - dev_warn(dev, "Communication established.\n"); - break; - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "Send queue drained.\n"); - break; - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "Invalid request local work queue error.\n"); - break; - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn); - break; - default: - break; - } - - hns_roce_qp_event(hr_dev, qpn, event_type); -} - -static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type, u32 cqn) -{ - struct device *dev = hr_dev->dev; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%x access err.\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); - break; - default: - break; - } - - hns_roce_cq_event(hr_dev, cqn, event_type); -} - static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) { u32 buf_chk_sz; @@ -4251,31 +4417,23 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_warn(dev, "Path migrated succeeded.\n"); - break; case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "Path migration failed.\n"); - break; case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type, - qpn); + hns_roce_qp_event(hr_dev, qpn, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - dev_warn(dev, "SRQ not support.\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type, - cqn); + hns_roce_cq_event(hr_dev, cqn, event_type); break; case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - dev_warn(dev, "DB overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, @@ -4284,10 +4442,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, le64_to_cpu(aeqe->event.cmd.out_param)); break; case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: - dev_warn(dev, "CEQ overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_FLR: - dev_warn(dev, "Function level reset.\n"); break; default: dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", @@ -4304,7 +4460,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, dev_warn(dev, "cons_index overflow, set back to 0.\n"); eq->cons_index = 0; } - hns_roce_v2_init_irq_work(hr_dev, eq, qpn); + hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); } set_eq_cons_index_v2(eq); @@ -5125,6 +5281,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) create_singlethread_workqueue("hns_roce_irq_workqueue"); if (!hr_dev->irq_workq) { dev_err(dev, "Create irq workqueue failed!\n"); + ret = -ENOMEM; goto err_request_irq_fail; } @@ -5195,6 +5352,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .set_mac = hns_roce_v2_set_mac, .write_mtpt = hns_roce_v2_write_mtpt, .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, + .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, + .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, .write_cqc = hns_roce_v2_write_cqc, .set_hem = hns_roce_v2_set_hem, .clear_hem = hns_roce_v2_clear_hem, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 14aa308befef..8bc820635bbd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -50,6 +50,7 @@ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff +#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 @@ -78,6 +79,7 @@ #define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 +#define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_CONTEXT_HOP_NUM 1 #define HNS_ROCE_MTT_HOP_NUM 1 @@ -201,6 +203,7 @@ enum { /* CMQ command */ enum hns_roce_opcode_type { + HNS_QUERY_FW_VER = 0x0001, HNS_ROCE_OPC_QUERY_HW_VER = 0x8000, HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001, HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004, @@ -324,6 +327,7 @@ struct hns_roce_v2_cq_context { enum{ V2_MPT_ST_VALID = 0x1, + V2_MPT_ST_FREE = 0x2, }; enum hns_roce_v2_qp_state { @@ -350,7 +354,7 @@ struct hns_roce_v2_qp_context { __le32 dmac; __le32 byte_52_udpspn_dmac; __le32 byte_56_dqpn_err; - __le32 byte_60_qpst_mapid; + __le32 byte_60_qpst_tempid; __le32 qkey_xrcd; __le32 byte_68_rq_db; __le32 rq_db_record_addr; @@ -492,26 +496,15 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_56_LP_PKTN_INI_S 28 #define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28) -#define V2_QPC_BYTE_60_MAPID_S 0 -#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0) +#define V2_QPC_BYTE_60_TEMPID_S 0 +#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0) -#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13 +#define V2_QPC_BYTE_60_SCC_TOKEN_S 8 +#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8) -#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14 +#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27 -#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15 - -#define V2_QPC_BYTE_60_TEMPID_S 16 -#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16) - -#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23 - -#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24 -#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24) - -#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27 - -#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28 +#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28 #define V2_QPC_BYTE_60_QP_ST_S 29 #define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29) @@ -534,6 +527,7 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_76_RQIE_S 28 +#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30 #define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) @@ -588,7 +582,7 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_140_RR_MAX_S 12 #define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12) -#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15 +#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15 #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16 #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16) @@ -599,8 +593,6 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0 #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0) -#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24 - #define V2_QPC_BYTE_144_RAQ_CREDIT_S 25 #define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25) @@ -637,9 +629,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_168_LP_SGEN_INI_S 22 #define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22) -#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24 -#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24) - +#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24 +#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25 +#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26 +#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27 #define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28 #define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28) @@ -725,6 +718,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20 #define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20) +#define V2_QPC_BYTE_232_SO_LP_VLD_S 29 +#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30 +#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31 + #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0 #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0) @@ -743,6 +740,9 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_244_RNR_CNT_S 27 #define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27) +#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30 +#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31 + #define V2_QPC_BYTE_248_IRRL_PSN_S 0 #define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0) @@ -818,6 +818,11 @@ struct hns_roce_v2_cqe { #define V2_CQE_BYTE_28_PORT_TYPE_S 16 #define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16) +#define V2_CQE_BYTE_28_VID_S 18 +#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18) + +#define V2_CQE_BYTE_28_VID_VLD_S 30 + #define V2_CQE_BYTE_32_RMT_QPN_S 0 #define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0) @@ -878,8 +883,19 @@ struct hns_roce_v2_mpt_entry { #define V2_MPT_BYTE_8_LW_EN_S 7 +#define V2_MPT_BYTE_8_MW_CNT_S 8 +#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8) + +#define V2_MPT_BYTE_12_FRE_S 0 + #define V2_MPT_BYTE_12_PA_S 1 +#define V2_MPT_BYTE_12_MR_MW_S 4 + +#define V2_MPT_BYTE_12_BPD_S 5 + +#define V2_MPT_BYTE_12_BQP_S 6 + #define V2_MPT_BYTE_12_INNER_PA_VLD_S 7 #define V2_MPT_BYTE_12_MW_BIND_QPN_S 8 @@ -988,6 +1004,8 @@ struct hns_roce_v2_ud_send_wqe { #define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24 #define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24) +#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 + #define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 #define V2_UD_SEND_WQE_DMAC_0_S 0 @@ -1042,6 +1060,16 @@ struct hns_roce_v2_rc_send_wqe { #define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 +#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19 + +#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20 + +#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21 + +#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22 + +#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23 + #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) @@ -1051,6 +1079,16 @@ struct hns_roce_v2_rc_send_wqe { #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) +struct hns_roce_wqe_frmr_seg { + __le32 pbl_size; + __le32 mode_buf_pg_sz; +}; + +#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4 +#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4) + +#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8 + struct hns_roce_v2_wqe_data_seg { __le32 len; __le32 lkey; @@ -1068,6 +1106,11 @@ struct hns_roce_query_version { __le32 rsv[5]; }; +struct hns_roce_query_fw_info { + __le32 fw_ver; + __le32 rsv[5]; +}; + struct hns_roce_cfg_llm_a { __le32 base_addr_l; __le32 base_addr_h; @@ -1564,4 +1607,9 @@ struct hns_roce_eq_context { #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) +struct hns_roce_wqe_atomic_seg { + __le64 fetchadd_swap_data; + __le64 cmp_data; +}; + #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c5cae9a38c04..1b3ee514f2ef 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -196,6 +196,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, memset(props, 0, sizeof(*props)); + props->fw_ver = hr_dev->caps.fw_ver; props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); props->max_mr_size = (u64)(~(0ULL)); props->page_size_cap = hr_dev->caps.page_size_cap; @@ -215,7 +216,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_pd = hr_dev->caps.num_pds; props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; - props->atomic_cap = IB_ATOMIC_NONE; + props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = 1; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; @@ -344,8 +346,6 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, if (ret) goto error_fail_uar_alloc; - INIT_LIST_HEAD(&context->vma_list); - mutex_init(&context->vma_list_mutex); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { INIT_LIST_HEAD(&context->page_list); mutex_init(&context->page_mutex); @@ -376,76 +376,34 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) return 0; } -static void hns_roce_vma_open(struct vm_area_struct *vma) -{ - vma->vm_ops = NULL; -} - -static void hns_roce_vma_close(struct vm_area_struct *vma) -{ - struct hns_roce_vma_data *vma_data; - - vma_data = (struct hns_roce_vma_data *)vma->vm_private_data; - vma_data->vma = NULL; - mutex_lock(vma_data->vma_list_mutex); - list_del(&vma_data->list); - mutex_unlock(vma_data->vma_list_mutex); - kfree(vma_data); -} - -static const struct vm_operations_struct hns_roce_vm_ops = { - .open = hns_roce_vma_open, - .close = hns_roce_vma_close, -}; - -static int hns_roce_set_vma_data(struct vm_area_struct *vma, - struct hns_roce_ucontext *context) -{ - struct list_head *vma_head = &context->vma_list; - struct hns_roce_vma_data *vma_data; - - vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL); - if (!vma_data) - return -ENOMEM; - - vma_data->vma = vma; - vma_data->vma_list_mutex = &context->vma_list_mutex; - vma->vm_private_data = vma_data; - vma->vm_ops = &hns_roce_vm_ops; - - mutex_lock(&context->vma_list_mutex); - list_add(&vma_data->list, vma_head); - mutex_unlock(&context->vma_list_mutex); - - return 0; -} - static int hns_roce_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct hns_roce_dev *hr_dev = to_hr_dev(context->device); - if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) - return -EINVAL; + switch (vma->vm_pgoff) { + case 0: + return rdma_user_mmap_io(context, vma, + to_hr_ucontext(context)->uar.pfn, + PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); + + /* vm_pgoff: 1 -- TPTR */ + case 1: + if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) + return -EINVAL; + /* + * FIXME: using io_remap_pfn_range on the dma address returned + * by dma_alloc_coherent is totally wrong. + */ + return rdma_user_mmap_io(context, vma, + hr_dev->tptr_dma_addr >> PAGE_SHIFT, + hr_dev->tptr_size, + vma->vm_page_prot); - if (vma->vm_pgoff == 0) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (io_remap_pfn_range(vma, vma->vm_start, - to_hr_ucontext(context)->uar.pfn, - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; - } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr && - hr_dev->tptr_size) { - /* vm_pgoff: 1 -- TPTR */ - if (io_remap_pfn_range(vma, vma->vm_start, - hr_dev->tptr_dma_addr >> PAGE_SHIFT, - hr_dev->tptr_size, - vma->vm_page_prot)) - return -EAGAIN; - } else + default: return -EINVAL; - - return hns_roce_set_vma_data(vma, to_hr_ucontext(context)); + } } static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, @@ -471,21 +429,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext) { - struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); - struct hns_roce_vma_data *vma_data, *n; - struct vm_area_struct *vma; - - mutex_lock(&context->vma_list_mutex); - list_for_each_entry_safe(vma_data, n, &context->vma_list, list) { - vma = vma_data->vma; - zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); - - vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); - vma->vm_ops = NULL; - list_del(&vma_data->list); - kfree(vma_data); - } - mutex_unlock(&context->vma_list_mutex); } static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) @@ -508,7 +451,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) spin_lock_init(&iboe->lock); ib_dev = &hr_dev->ib_dev; - strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX); ib_dev->owner = THIS_MODULE; ib_dev->node_type = RDMA_NODE_IB_CA; @@ -584,12 +526,27 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); } + /* MW */ + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) { + ib_dev->alloc_mw = hns_roce_alloc_mw; + ib_dev->dealloc_mw = hns_roce_dealloc_mw; + ib_dev->uverbs_cmd_mask |= + (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW); + } + + /* FRMR */ + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { + ib_dev->alloc_mr = hns_roce_alloc_mr; + ib_dev->map_mr_sg = hns_roce_map_mr_sg; + } + /* OTHERS */ ib_dev->get_port_immutable = hns_roce_port_immutable; ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; ib_dev->driver_id = RDMA_DRIVER_HNS; - ret = ib_register_device(ib_dev, NULL); + ret = ib_register_device(ib_dev, "hns_%d", NULL); if (ret) { dev_err(dev, "ib_register_device failed!\n"); return ret; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index eb26a5f6fc58..521ad2aa3a4e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -329,7 +329,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, u64 bt_idx; u64 size; - mhop_num = hr_dev->caps.pbl_hop_num; + mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); @@ -351,7 +351,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, mr->pbl_size = npages; mr->pbl_ba = mr->pbl_dma_addr; - mr->pbl_hop_num = hr_dev->caps.pbl_hop_num; + mr->pbl_hop_num = mhop_num; mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; return 0; @@ -511,7 +511,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, mr->key = hw_index_to_key(index); /* MR key */ if (size == ~0ull) { - mr->type = MR_TYPE_DMA; mr->pbl_buf = NULL; mr->pbl_dma_addr = 0; /* PBL multi-hop addressing parameters */ @@ -522,7 +521,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, mr->pbl_l1_dma_addr = NULL; mr->pbl_l0_dma_addr = 0; } else { - mr->type = MR_TYPE_MR; if (!hr_dev->caps.pbl_hop_num) { mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, &(mr->pbl_dma_addr), @@ -548,9 +546,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, u32 mhop_num; u64 bt_idx; - npages = ib_umem_page_count(mr->umem); + npages = mr->pbl_size; pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); - mhop_num = hr_dev->caps.pbl_hop_num; + mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num; if (mhop_num == HNS_ROCE_HOP_NUM_0) return; @@ -636,7 +634,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, } if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); + if (mr->type == MR_TYPE_MR) + npages = ib_umem_page_count(mr->umem); if (!hr_dev->caps.pbl_hop_num) dma_free_coherent(dev, (unsigned int)(npages * 8), @@ -674,7 +673,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, goto err_table; } - ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + if (mr->type != MR_TYPE_FRMR) + ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + else + ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); if (ret) { dev_err(dev, "Write mtpt fail!\n"); goto err_page; @@ -855,6 +857,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) if (mr == NULL) return ERR_PTR(-ENOMEM); + mr->type = MR_TYPE_DMA; + /* Allocate memory region key */ ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, ~0ULL, acc, 0, mr); @@ -1031,6 +1035,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } } + mr->type = MR_TYPE_MR; + ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, access_flags, n, mr); if (ret) @@ -1201,3 +1207,193 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) return ret; } + +struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct device *dev = hr_dev->dev; + struct hns_roce_mr *mr; + u64 length; + u32 page_size; + int ret; + + page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT); + length = max_num_sg * page_size; + + if (mr_type != IB_MR_TYPE_MEM_REG) + return ERR_PTR(-EINVAL); + + if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) { + dev_err(dev, "max_num_sg larger than %d\n", + HNS_ROCE_FRMR_MAX_PA); + return ERR_PTR(-EINVAL); + } + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->type = MR_TYPE_FRMR; + + /* Allocate memory region key */ + ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length, + 0, max_num_sg, mr); + if (ret) + goto err_free; + + ret = hns_roce_mr_enable(hr_dev, mr); + if (ret) + goto err_mr; + + mr->ibmr.rkey = mr->ibmr.lkey = mr->key; + mr->umem = NULL; + + return &mr->ibmr; + +err_mr: + hns_roce_mr_free(to_hr_dev(pd->device), mr); + +err_free: + kfree(mr); + return ERR_PTR(ret); +} + +static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct hns_roce_mr *mr = to_hr_mr(ibmr); + + mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); + + return 0; +} + +int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) +{ + struct hns_roce_mr *mr = to_hr_mr(ibmr); + + mr->npages = 0; + + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); +} + +static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, + struct hns_roce_mw *mw) +{ + struct device *dev = hr_dev->dev; + int ret; + + if (mw->enabled) { + ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey) + & (hr_dev->caps.num_mtpts - 1)); + if (ret) + dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret); + + hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, + key_to_hw_index(mw->rkey)); + } + + hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, + key_to_hw_index(mw->rkey), BITMAP_NO_RR); +} + +static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, + struct hns_roce_mw *mw) +{ + struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; + struct hns_roce_cmd_mailbox *mailbox; + struct device *dev = hr_dev->dev; + unsigned long mtpt_idx = key_to_hw_index(mw->rkey); + int ret; + + /* prepare HEM entry memory */ + ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); + if (ret) + return ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ret = PTR_ERR(mailbox); + goto err_table; + } + + ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); + if (ret) { + dev_err(dev, "MW write mtpt fail!\n"); + goto err_page; + } + + ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, + mtpt_idx & (hr_dev->caps.num_mtpts - 1)); + if (ret) { + dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret); + goto err_page; + } + + mw->enabled = 1; + + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return 0; + +err_page: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + +err_table: + hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); + + return ret; +} + +struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device); + struct hns_roce_mw *mw; + unsigned long index = 0; + int ret; + + mw = kmalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) + return ERR_PTR(-ENOMEM); + + /* Allocate a key for mw from bitmap */ + ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); + if (ret) + goto err_bitmap; + + mw->rkey = hw_index_to_key(index); + + mw->ibmw.rkey = mw->rkey; + mw->ibmw.type = type; + mw->pdn = to_hr_pd(ib_pd)->pdn; + mw->pbl_hop_num = hr_dev->caps.pbl_hop_num; + mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; + mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; + + ret = hns_roce_mw_enable(hr_dev, mw); + if (ret) + goto err_mw; + + return &mw->ibmw; + +err_mw: + hns_roce_mw_free(hr_dev, mw); + +err_bitmap: + kfree(mw); + + return ERR_PTR(ret); +} + +int hns_roce_dealloc_mw(struct ib_mw *ibmw) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); + struct hns_roce_mw *mw = to_hr_mw(ibmw); + + hns_roce_mw_free(hr_dev, mw); + kfree(mw); + + return 0; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index efb7e961ca65..5ebf481a39d9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <linux/pci.h> #include <linux/platform_device.h> #include <rdma/ib_addr.h> #include <rdma/ib_umem.h> @@ -343,6 +344,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, { u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u8 max_sq_stride = ilog2(roundup_sq_stride); + u32 ex_sge_num; u32 page_size; u32 max_cnt; @@ -372,7 +374,18 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, if (hr_qp->sq.max_gs > 2) hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * (hr_qp->sq.max_gs - 2)); + + if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { + if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { + dev_err(hr_dev->dev, + "The extended sge cnt error! sge_cnt=%d\n", + hr_qp->sge.sge_cnt); + return -EINVAL; + } + } + hr_qp->sge.sge_shift = 4; + ex_sge_num = hr_qp->sge.sge_cnt; /* Get buf size, SQ and RQ are aligned to page_szie */ if (hr_dev->caps.max_sq_sg <= 2) { @@ -386,6 +399,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift), PAGE_SIZE); } else { page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); + hr_qp->sge.sge_cnt = + max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num); hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size) + HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << @@ -394,7 +409,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift), page_size); hr_qp->sq.offset = 0; - if (hr_qp->sge.sge_cnt) { + if (ex_sge_num) { hr_qp->sge.offset = HNS_ROCE_ALOGN_UP( (hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift), @@ -465,6 +480,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sge.sge_shift = 4; } + if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { + if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { + dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", + hr_qp->sge.sge_cnt); + return -EINVAL; + } + } + /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); hr_qp->sq.offset = 0; @@ -472,6 +495,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, page_size); if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { + hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), + (u32)hr_qp->sge.sge_cnt); hr_qp->sge.offset = size; size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift, page_size); @@ -952,8 +977,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, - IB_LINK_LAYER_ETHERNET)) { + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, + attr_mask)) { dev_err(dev, "ib_modify_qp_is_ok failed\n"); goto out; } @@ -1106,14 +1131,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; int reserved_from_top = 0; + int reserved_from_bot; int ret; spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); - /* A port include two SQP, six port total 12 */ + /* In hw v1, a port include two SQP, six ports total 12 */ + if (hr_dev->caps.max_sq_sg <= 2) + reserved_from_bot = SQP_NUM; + else + reserved_from_bot = hr_dev->caps.reserved_qps; + ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, - hr_dev->caps.num_qps - 1, SQP_NUM, + hr_dev->caps.num_qps - 1, reserved_from_bot, reserved_from_top); if (ret) { dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", |