diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
43 files changed, 12730 insertions, 7837 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 974929dcc74e..82dd47068e18 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o -qed-$(CONFIG_QED_RDMA) += qed_roce.o +qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o qed-$(CONFIG_QED_FCOE) += qed_fcoe.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 2ab1aab7c3fe..91003bc6f00b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -54,7 +54,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 #define QED_MINOR_VERSION 10 -#define QED_REVISION_VERSION 10 +#define QED_REVISION_VERSION 11 #define QED_ENGINEERING_VERSION 21 #define QED_VERSION \ @@ -92,7 +92,7 @@ enum qed_mcp_protocol_type; #define QED_MFW_SET_FIELD(name, field, value) \ do { \ - (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \ + (name) &= ~(field ## _MASK); \ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\ } while (0) @@ -210,14 +210,16 @@ struct qed_tunn_update_params { /* The PCI personality is not quite synonymous to protocol ID: * 1. All personalities need CORE connections - * 2. The Ethernet personality may support also the RoCE protocol + * 2. The Ethernet personality may support also the RoCE/iWARP protocol */ enum qed_pci_personality { QED_PCI_ETH, QED_PCI_FCOE, QED_PCI_ISCSI, QED_PCI_ETH_ROCE, - QED_PCI_DEFAULT /* default in shmem */ + QED_PCI_ETH_IWARP, + QED_PCI_ETH_RDMA, + QED_PCI_DEFAULT, /* default in shmem */ }; /* All VFs are symmetric, all counters are PF + all VFs */ @@ -277,6 +279,7 @@ enum qed_dev_cap { QED_DEV_CAP_FCOE, QED_DEV_CAP_ISCSI, QED_DEV_CAP_ROCE, + QED_DEV_CAP_IWARP, }; enum qed_wol_support { @@ -286,7 +289,24 @@ enum qed_wol_support { struct qed_hw_info { /* PCI personality */ - enum qed_pci_personality personality; + enum qed_pci_personality personality; +#define QED_IS_RDMA_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_ROCE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_IWARP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_L2_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH || \ + QED_IS_RDMA_PERSONALITY(dev)) +#define QED_IS_FCOE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_FCOE) +#define QED_IS_ISCSI_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ISCSI) /* Resource Allocation scheme results */ u32 resc_start[QED_MAX_RESC]; @@ -412,6 +432,11 @@ struct qed_fw_data { u32 init_ops_size; }; +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + #define DRV_MODULE_VERSION \ __stringify(QED_MAJOR_VERSION) "." \ __stringify(QED_MINOR_VERSION) "." \ @@ -495,10 +520,6 @@ struct qed_hwfn { bool b_rdma_enabled_in_prs; u32 rdma_prs_search_reg; - /* Array of sb_info of all status blocks */ - struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; - u16 num_sbs; - struct qed_cxt_mngr *p_cxt_mngr; /* Flag indicating whether interrupts are enabled or not*/ @@ -537,6 +558,9 @@ struct qed_hwfn { u8 dcbx_no_edpm; u8 db_bar_no_edpm; + /* L2-related */ + struct qed_l2_info *p_l2_info; + struct qed_ptt *p_arfs_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; @@ -548,7 +572,6 @@ struct qed_hwfn { #endif struct z_stream_s *stream; - struct qed_roce_ll2_info *ll2; }; struct pci_params { @@ -598,16 +621,11 @@ struct qed_dev { enum qed_dev_type type; /* Translate type/revision combo into the proper conditions */ #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) -#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ - CHIP_REV_IS_A0(dev)) #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ CHIP_REV_IS_B0(dev)) #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) -#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ - QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) - u16 vendor_id; u16 device_id; #define QED_DEV_ID_MASK 0xff00 @@ -621,7 +639,6 @@ struct qed_dev { u16 chip_rev; #define CHIP_REV_MASK 0xf #define CHIP_REV_SHIFT 12 -#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) u16 chip_metal; @@ -633,7 +650,7 @@ struct qed_dev { #define CHIP_BOND_ID_SHIFT 0 u8 num_engines; - u8 num_ports_in_engines; + u8 num_ports_in_engine; u8 num_funcs_in_port; u8 path_id; @@ -644,7 +661,6 @@ struct qed_dev { int pcie_width; int pcie_speed; - u8 ver_str[VER_SIZE]; /* Add MF related configuration */ u8 mcp_rev; @@ -763,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, } #define PURE_LB_TC 8 -#define OOO_LB_TC 9 +#define PKT_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, @@ -773,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); int qed_device_get_port_id(struct qed_dev *cdev); +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 694845793af2..af106be8cc08 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -135,7 +135,6 @@ struct qed_tid_seg { struct qed_conn_type_cfg { u32 cid_count; - u32 cid_start; u32 cids_per_vf; struct qed_tid_seg tid_seg[TASK_SEGMENTS]; }; @@ -222,6 +221,9 @@ struct qed_cxt_mngr { /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; + struct qed_cid_acquired_map + acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS]; + /* ILT shadow table */ struct qed_dma_mem *ilt_shadow; u32 pf_start_line; @@ -244,14 +246,16 @@ struct qed_cxt_mngr { static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || - type == PROTOCOLID_FCOE; + type == PROTOCOLID_FCOE || + type == PROTOCOLID_IWARP; } static bool tm_cid_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || type == PROTOCOLID_FCOE || - type == PROTOCOLID_ROCE; + type == PROTOCOLID_ROCE || + type == PROTOCOLID_IWARP; } static bool tm_tid_proto(enum protocol_type type) @@ -851,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) if (!excess_lines) return 0; - if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) return 0; p_mngr = p_hwfn->p_cxt_mngr; @@ -1031,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 lines, line, sz_left, lines_to_skip = 0; /* Special handling for RoCE that supports dynamic allocation */ - if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) return 0; @@ -1121,45 +1125,76 @@ ilt_shadow_fail: static void qed_cid_map_free(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 type; + u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { kfree(p_mngr->acquired[type].cid_map); p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; + + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + kfree(p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].max_count = 0; + p_mngr->acquired_vf[type][vf].start_cid = 0; + } } } +static int +qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, + u32 type, + u32 cid_start, + u32 cid_count, struct qed_cid_acquired_map *p_map) +{ + u32 size; + + if (!cid_count) + return 0; + + size = DIV_ROUND_UP(cid_count, + sizeof(unsigned long) * BITS_PER_BYTE) * + sizeof(unsigned long); + p_map->cid_map = kzalloc(size, GFP_KERNEL); + if (!p_map->cid_map) + return -ENOMEM; + + p_map->max_count = cid_count; + p_map->start_cid = cid_start; + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Type %08x start: %08x count %08x\n", + type, p_map->start_cid, p_map->max_count); + + return 0; +} + static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 start_cid = 0; - u32 type; + u32 start_cid = 0, vf_start_cid = 0; + u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { - u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; - u32 size; - - if (cid_cnt == 0) - continue; + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type]; + struct qed_cid_acquired_map *p_map; - size = DIV_ROUND_UP(cid_cnt, - sizeof(unsigned long) * BITS_PER_BYTE) * - sizeof(unsigned long); - p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL); - if (!p_mngr->acquired[type].cid_map) + /* Handle PF maps */ + p_map = &p_mngr->acquired[type]; + if (qed_cid_map_alloc_single(p_hwfn, type, start_cid, + p_cfg->cid_count, p_map)) goto cid_map_fail; - p_mngr->acquired[type].max_count = cid_cnt; - p_mngr->acquired[type].start_cid = start_cid; - - p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid; + /* Handle VF maps */ + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + if (qed_cid_map_alloc_single(p_hwfn, type, + vf_start_cid, + p_cfg->cids_per_vf, p_map)) + goto cid_map_fail; + } - DP_VERBOSE(p_hwfn, QED_MSG_CXT, - "Type %08x start: %08x count %08x\n", - type, p_mngr->acquired[type].start_cid, - p_mngr->acquired[type].max_count); - start_cid += cid_cnt; + start_cid += p_cfg->cid_count; + vf_start_cid += p_cfg->cids_per_vf; } return 0; @@ -1265,19 +1300,36 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map; + struct qed_conn_type_cfg *p_cfg; int type; + u32 len; /* Reset acquired cids */ for (type = 0; type < MAX_CONN_TYPES; type++) { - u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; + u32 vf; + + p_cfg = &p_mngr->conn_cfg[type]; + if (p_cfg->cid_count) { + p_map = &p_mngr->acquired[type]; + len = DIV_ROUND_UP(p_map->max_count, + sizeof(unsigned long) * + BITS_PER_BYTE) * + sizeof(unsigned long); + memset(p_map->cid_map, 0, len); + } - if (cid_cnt == 0) + if (!p_cfg->cids_per_vf) continue; - memset(p_mngr->acquired[type].cid_map, 0, - DIV_ROUND_UP(cid_cnt, - sizeof(unsigned long) * BITS_PER_BYTE) * - sizeof(unsigned long)); + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + len = DIV_ROUND_UP(p_map->max_count, + sizeof(unsigned long) * + BITS_PER_BYTE) * + sizeof(unsigned long); + memset(p_map->cid_map, 0, len); + } } } @@ -1783,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) tm_offset += tm_iids.pf_tids[i]; } - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) active_seg_mask = 0; STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); @@ -1841,91 +1893,145 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_prs_init_pf(p_hwfn); } -int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, - enum protocol_type type, u32 *p_cid) +int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid, u8 vfid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map; u32 rel_cid; - if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) { + if (type >= MAX_CONN_TYPES) { DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); return -EINVAL; } - rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map, - p_mngr->acquired[type].max_count); + if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) { + DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid); + return -EINVAL; + } - if (rel_cid >= p_mngr->acquired[type].max_count) { + /* Determine the right map to take this CID from */ + if (vfid == QED_CXT_PF_CID) + p_map = &p_mngr->acquired[type]; + else + p_map = &p_mngr->acquired_vf[type][vfid]; + + if (!p_map->cid_map) { + DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); + return -EINVAL; + } + + rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count); + + if (rel_cid >= p_map->max_count) { DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); return -EINVAL; } - __set_bit(rel_cid, p_mngr->acquired[type].cid_map); + __set_bit(rel_cid, p_map->cid_map); - *p_cid = rel_cid + p_mngr->acquired[type].start_cid; + *p_cid = rel_cid + p_map->start_cid; + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", + *p_cid, rel_cid, vfid, type); return 0; } +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid) +{ + return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID); +} + static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, - u32 cid, enum protocol_type *p_type) + u32 cid, + u8 vfid, + enum protocol_type *p_type, + struct qed_cid_acquired_map **pp_map) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - struct qed_cid_acquired_map *p_map; - enum protocol_type p; u32 rel_cid; /* Iterate over protocols and find matching cid range */ - for (p = 0; p < MAX_CONN_TYPES; p++) { - p_map = &p_mngr->acquired[p]; + for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { + if (vfid == QED_CXT_PF_CID) + *pp_map = &p_mngr->acquired[*p_type]; + else + *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; - if (!p_map->cid_map) + if (!((*pp_map)->cid_map)) continue; - if (cid >= p_map->start_cid && - cid < p_map->start_cid + p_map->max_count) + if (cid >= (*pp_map)->start_cid && + cid < (*pp_map)->start_cid + (*pp_map)->max_count) break; } - *p_type = p; - if (p == MAX_CONN_TYPES) { - DP_NOTICE(p_hwfn, "Invalid CID %d", cid); - return false; + if (*p_type == MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid); + goto fail; } - rel_cid = cid - p_map->start_cid; - if (!test_bit(rel_cid, p_map->cid_map)) { - DP_NOTICE(p_hwfn, "CID %d not acquired", cid); - return false; + rel_cid = cid - (*pp_map)->start_cid; + if (!test_bit(rel_cid, (*pp_map)->cid_map)) { + DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired", + cid, vfid); + goto fail; } + return true; +fail: + *p_type = MAX_CONN_TYPES; + *pp_map = NULL; + return false; } -void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) +void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid) { - struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map = NULL; enum protocol_type type; bool b_acquired; u32 rel_cid; + if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) { + DP_NOTICE(p_hwfn, + "Trying to return incorrect CID belonging to VF %02x\n", + vfid); + return; + } + /* Test acquired and find matching per-protocol map */ - b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type); + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid, + &type, &p_map); if (!b_acquired) return; - rel_cid = cid - p_mngr->acquired[type].start_cid; - __clear_bit(rel_cid, p_mngr->acquired[type].cid_map); + rel_cid = cid - p_map->start_cid; + clear_bit(rel_cid, p_map->cid_map); + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", + cid, rel_cid, vfid, type); +} + +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID); } int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map = NULL; u32 conn_cxt_size, hw_p_size, cxts_per_p, line; enum protocol_type type; bool b_acquired; /* Test acquired and find matching per-protocol map */ - b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type); + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, + QED_CXT_PF_CID, &type, &p_map); if (!b_acquired) return -EINVAL; @@ -1964,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_IWARP: + /* Each QP requires one connection */ + num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps); + proto = PROTOCOLID_IWARP; + break; case QED_PCI_ETH_ROCE: num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); num_cons = num_qps * 2; /* each QP requires two connections */ @@ -1999,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_RDMA: + case QED_PCI_ETH_IWARP: case QED_PCI_ETH_ROCE: { qed_rdma_set_pf_params(p_hwfn, @@ -2012,8 +2125,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, 1); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } @@ -2236,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, last_cid_allocated - 1); if (!p_hwfn->b_rdma_enabled_in_prs) { - /* Enable RoCE search */ + /* Enable RDMA search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); p_hwfn->b_rdma_enabled_in_prs = true; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 53ad532dc212..17836349a274 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -54,19 +54,6 @@ struct qed_tid_mem { }; /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type - * - * @param p_hwfn - * @param type - * @param p_cid - * - * @return int - */ -int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, - enum protocol_type type, - u32 *p_cid); - -/** * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid * * @@ -195,14 +182,51 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +#define QED_CXT_PF_CID (0xff) + /** * @brief qed_cxt_release - Release a cid * * @param p_hwfn * @param cid */ -void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, - u32 cid); +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); + +/** + * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * + * @param p_hwfn + * @param cid + * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + */ +void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); + +/** + * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * + * @param p_hwfn + * @param type + * @param p_cid + * + * @return int + */ +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid); + +/** + * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type + * for a vf-queue + * + * @param p_hwfn + * @param type + * @param p_cid + * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * + * @return int + */ +int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid, u8 vfid); + int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type elem_type, u32 iid); u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d883ad5bec6d..eaca4578435d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -44,6 +44,7 @@ #include "qed_hsi.h" #include "qed_sp.h" #include "qed_sriov.h" +#include "qed_rdma.h" #ifdef CONFIG_DCB #include <linux/qed/qed_eth_if.h> #endif @@ -191,17 +192,19 @@ static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hw_info *p_info, bool enable, - bool update, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { /* PF update ramrod data */ - p_data->arr[type].update = update; p_data->arr[type].enable = enable; p_data->arr[type].priority = prio; p_data->arr[type].tc = tc; + if (enable) + p_data->arr[type].update = UPDATE_DCB; + else + p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; /* QM reconf data */ if (p_info->personality == personality) @@ -213,7 +216,6 @@ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, bool enable, - bool update, u8 prio, u8 tc, enum dcbx_protocol_type type) { struct qed_hw_info *p_info = &p_hwfn->hw_info; @@ -231,7 +233,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; name = qed_dcbx_app_update[i].name; - qed_dcbx_set_params(p_data, p_info, enable, update, + qed_dcbx_set_params(p_data, p_info, enable, prio, tc, type, personality); } } @@ -304,22 +306,11 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, */ enable = !(type == DCBX_PROTOCOL_ETH); - qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + qed_dcbx_update_app_info(p_data, p_hwfn, enable, priority, tc, type); } } - /* If RoCE-V2 TLV is not detected, driver need to use RoCE app - * data for RoCE-v2 not the default app data. - */ - if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update && - p_data->arr[DCBX_PROTOCOL_ROCE].update) { - tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc; - priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority; - qed_dcbx_update_app_info(p_data, p_hwfn, true, true, - priority, tc, DCBX_PROTOCOL_ROCE_V2); - } - /* Update ramrod protocol data and hw_info fields * with default info when corresponding APP TLV's are not detected. * The enabled field has a different logic for ethernet as only for @@ -332,8 +323,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, if (p_data->arr[type].update) continue; - enable = !(type == DCBX_PROTOCOL_ETH); - qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; + qed_dcbx_update_app_info(p_data, p_hwfn, enable, priority, tc, type); } @@ -902,10 +893,33 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, /* update storm FW with negotiation results */ qed_sp_pf_update(p_hwfn); + + /* for roce PFs, we may want to enable/disable DPM + * when DCBx change occurs + */ + if (p_hwfn->hw_info.personality == + QED_PCI_ETH_ROCE) + qed_roce_dpm_dcbx(p_hwfn, p_ptt); } } qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); + + if (type == QED_DCBX_OPERATIONAL_MIB) { + struct qed_dcbx_results *p_data; + u16 val; + + /* Configure in NIG which protocols support EDPM and should + * honor PFC. + */ + p_data = &p_hwfn->p_dcbx_info->results; + val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) | + (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc); + val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT; + val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN; + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val); + } + qed_dcbx_aen(p_hwfn, type); return rc; @@ -923,6 +937,7 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) void qed_dcbx_info_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_dcbx_info); + p_hwfn->p_dcbx_info = NULL; } static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, @@ -944,17 +959,18 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, p_dest->pf_id = p_src->pf_id; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; - p_dest->update_fcoe_dcb_data_flag = update_flag; + p_dest->update_fcoe_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; - p_dest->update_roce_dcb_data_flag = update_flag; + p_dest->update_roce_dcb_data_mode = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; - p_dest->update_roce_dcb_data_flag = update_flag; + p_dest->update_rroce_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; - p_dest->update_iscsi_dcb_data_flag = update_flag; + p_dest->update_iscsi_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; - p_dest->update_eth_dcb_data_flag = update_flag; + p_dest->update_eth_dcb_data_mode = update_flag; p_dcb_data = &p_dest->fcoe_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); @@ -1458,7 +1474,7 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) break; case DCB_CAP_ATTR_DCBX: *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | - DCB_CAP_DCBX_VER_IEEE); + DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC); break; default: *cap = false; @@ -1532,6 +1548,8 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) mode |= DCB_CAP_DCBX_VER_IEEE; if (dcbx_info->operational.cee) mode |= DCB_CAP_DCBX_VER_CEE; + if (dcbx_info->operational.local) + mode |= DCB_CAP_DCBX_STATIC; DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode); kfree(dcbx_info); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 414e26268f3a..5feb90e049e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -52,7 +52,7 @@ enum qed_mib_read_type { struct qed_dcbx_app_data { bool enable; /* DCB enabled */ - bool update; /* Update indication */ + u8 update; /* Update indication */ u8 priority; /* Priority */ u8 tc; /* Traffic Class */ }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index a672f6a860dc..03c3cf77aaff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -15,13 +15,6 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" -/* Chip IDs enum */ -enum chip_ids { - CHIP_BB_B0, - CHIP_K2, - MAX_CHIP_IDS -}; - /* Memory groups enum */ enum mem_groups { MEM_GROUP_PXP_MEM, @@ -33,7 +26,6 @@ enum mem_groups { MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, MEM_GROUP_SDM_MEM, - MEM_GROUP_PBUF, MEM_GROUP_IOR, MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, @@ -45,6 +37,7 @@ enum mem_groups { MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, MEM_GROUP_PXP_ILT, + MEM_GROUP_PBUF, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, MEM_GROUP_IGU_MEM, @@ -66,7 +59,6 @@ static const char * const s_mem_group_names[] = { "BRB_MEM", "PRS_MEM", "SDM_MEM", - "PBUF", "IOR", "RAM", "BTB_RAM", @@ -78,6 +70,7 @@ static const char * const s_mem_group_names[] = { "CAU_PI", "CAU_MEM", "PXP_ILT", + "PBUF", "MULD_MEM", "BTB_MEM", "IGU_MEM", @@ -88,48 +81,59 @@ static const char * const s_mem_group_names[] = { }; /* Idle check conditions */ -static u32 cond4(const u32 *r, const u32 *imm) + +static u32 cond5(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); } -static u32 cond6(const u32 *r, const u32 *imm) +static u32 cond7(const u32 *r, const u32 *imm) { return ((r[0] >> imm[0]) & imm[1]) != imm[2]; } -static u32 cond5(const u32 *r, const u32 *imm) +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]); +} + +static u32 cond6(const u32 *r, const u32 *imm) { return (r[0] & imm[0]) != imm[1]; } -static u32 cond8(const u32 *r, const u32 *imm) +static u32 cond9(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); } -static u32 cond9(const u32 *r, const u32 *imm) +static u32 cond10(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]); } -static u32 cond1(const u32 *r, const u32 *imm) +static u32 cond4(const u32 *r, const u32 *imm) { return (r[0] & ~imm[0]) != imm[1]; } static u32 cond0(const u32 *r, const u32 *imm) { + return (r[0] & ~r[1]) != imm[0]; +} + +static u32 cond1(const u32 *r, const u32 *imm) +{ return r[0] != imm[0]; } -static u32 cond10(const u32 *r, const u32 *imm) +static u32 cond11(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] == imm[0]; } -static u32 cond11(const u32 *r, const u32 *imm) +static u32 cond12(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] > imm[0]; } @@ -139,12 +143,12 @@ static u32 cond3(const u32 *r, const u32 *imm) return r[0] != r[1]; } -static u32 cond12(const u32 *r, const u32 *imm) +static u32 cond13(const u32 *r, const u32 *imm) { return r[0] & imm[0]; } -static u32 cond7(const u32 *r, const u32 *imm) +static u32 cond8(const u32 *r, const u32 *imm) { return r[0] < (r[1] - imm[0]); } @@ -169,6 +173,8 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond10, cond11, cond12, + cond13, + cond14, }; /******************************* Data Types **********************************/ @@ -181,11 +187,6 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct dbg_array { - const u32 *ptr; - u32 size_in_dwords; -}; - struct chip_platform_defs { u8 num_ports; u8 num_pfs; @@ -204,7 +205,9 @@ struct platform_defs { u32 delay_factor; }; -/* Storm constant definitions */ +/* Storm constant definitions. + * Addresses are in bytes, sizes are in quad-regs. + */ struct storm_defs { char letter; enum block_id block_id; @@ -218,13 +221,13 @@ struct storm_defs { u32 sem_sync_dbg_empty_addr; u32 sem_slow_dbg_empty_addr; u32 cm_ctx_wr_addr; - u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */ + u32 cm_conn_ag_ctx_lid_size; u32 cm_conn_ag_ctx_rd_addr; - u32 cm_conn_st_ctx_lid_size; /* In quad-regs */ + u32 cm_conn_st_ctx_lid_size; u32 cm_conn_st_ctx_rd_addr; - u32 cm_task_ag_ctx_lid_size; /* In quad-regs */ + u32 cm_task_ag_ctx_lid_size; u32 cm_task_ag_ctx_rd_addr; - u32 cm_task_st_ctx_lid_size; /* In quad-regs */ + u32 cm_task_st_ctx_lid_size; u32 cm_task_st_ctx_rd_addr; }; @@ -233,17 +236,23 @@ struct block_defs { const char *name; bool has_dbg_bus[MAX_CHIP_IDS]; bool associated_to_storm; - u32 storm_id; /* Valid only if associated_to_storm is true */ + + /* Valid only if associated_to_storm is true */ + u32 storm_id; enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; u32 dbg_select_addr; - u32 dbg_cycle_enable_addr; + u32 dbg_enable_addr; u32 dbg_shift_addr; u32 dbg_force_valid_addr; u32 dbg_force_frame_addr; bool has_reset_bit; - bool unreset; /* If true, the block is taken out of reset before dump */ + + /* If true, block is taken out of reset before dump */ + bool unreset; enum dbg_reset_regs reset_reg; - u8 reset_bit_offset; /* Bit offset in reset register */ + + /* Bit offset in reset register */ + u8 reset_bit_offset; }; /* Reset register definitions */ @@ -262,12 +271,13 @@ struct grc_param_defs { u32 crash_preset_val; }; +/* Address is in 128b units. Width is in bits. */ struct rss_mem_defs { const char *mem_name; const char *type_name; - u32 addr; /* In 128b units */ + u32 addr; u32 num_entries[MAX_CHIP_IDS]; - u32 entry_width[MAX_CHIP_IDS]; /* In bits */ + u32 entry_width[MAX_CHIP_IDS]; }; struct vfc_ram_defs { @@ -289,10 +299,20 @@ struct big_ram_defs { struct phy_defs { const char *phy_name; + + /* PHY base GRC address */ u32 base_addr; + + /* Relative address of indirect TBUS address register (bits 0..7) */ u32 tbus_addr_lo_addr; + + /* Relative address of indirect TBUS address register (bits 8..10) */ u32 tbus_addr_hi_addr; + + /* Relative address of indirect TBUS data register (bits 0..7) */ u32 tbus_data_lo_addr; + + /* Relative address of indirect TBUS data register (bits 8..11) */ u32 tbus_data_hi_addr; }; @@ -300,9 +320,11 @@ struct phy_defs { #define MAX_LCIDS 320 #define MAX_LTIDS 320 + #define NUM_IOR_SETS 2 #define IORS_PER_SET 176 #define IOR_SET_OFFSET(set_id) ((set_id) * 256) + #define BYTES_IN_DWORD sizeof(u32) /* In the macros below, size and offset are specified in bits */ @@ -315,6 +337,7 @@ struct phy_defs { #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ FIELD_DWORD_SHIFT(type, field)) + #define SET_VAR_FIELD(var, type, field, val) \ do { \ var[FIELD_DWORD_OFFSET(type, field)] &= \ @@ -322,31 +345,51 @@ struct phy_defs { var[FIELD_DWORD_OFFSET(type, field)] |= \ (val) << FIELD_DWORD_SHIFT(type, field); \ } while (0) + #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ do { \ for (i = 0; i < (arr_size); i++) \ qed_wr(dev, ptt, addr, (arr)[i]); \ } while (0) + #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \ do { \ for (i = 0; i < (arr_size); i++) \ (arr)[i] = qed_rd(dev, ptt, addr); \ } while (0) +#ifndef DWORDS_TO_BYTES #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) +#endif +#ifndef BYTES_TO_DWORDS #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) +#endif + +/* extra lines include a signature line + optional latency events line */ +#ifndef NUM_DBG_LINES +#define NUM_EXTRA_DBG_LINES(block_desc) \ + (1 + ((block_desc)->has_latency_events ? 1 : 0)) +#define NUM_DBG_LINES(block_desc) \ + ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) +#endif + #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines)) + #define REG_DUMP_LEN_SHIFT 24 #define MEM_DUMP_ENTRY_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem)) + #define IDLE_CHK_RULE_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule)) + #define IDLE_CHK_RESULT_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr)) + #define IDLE_CHK_RESULT_REG_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) + #define IDLE_CHK_MAX_ENTRIES_SIZE 32 /* The sizes and offsets below are specified in bits */ @@ -363,62 +406,92 @@ struct phy_defs { #define VFC_RAM_ADDR_ROW_OFFSET 2 #define VFC_RAM_ADDR_ROW_SIZE 10 #define VFC_RAM_RESP_STRUCT_SIZE 256 + #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE) #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE) #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE) #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE) #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE) + #define NUM_VFC_RAM_TYPES 4 + #define VFC_CAM_NUM_ROWS 512 + #define VFC_OPCODE_CAM_RD 14 #define VFC_OPCODE_RAM_RD 0 + #define NUM_RSS_MEM_TYPES 5 + #define NUM_BIG_RAM_TYPES 3 #define BIG_RAM_BLOCK_SIZE_BYTES 128 #define BIG_RAM_BLOCK_SIZE_DWORDS \ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES) + #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) + #define RESET_REG_UNRESET_OFFSET 4 + #define STALL_DELAY_MS 500 + #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_DBG_BUS_LINES 256 + #define NUM_COMMON_GLOBAL_PARAMS 8 + #define FW_IMG_MAIN 1 -#define REG_FIFO_DEPTH_ELEMENTS 32 + +#ifndef REG_FIFO_ELEMENT_DWORDS #define REG_FIFO_ELEMENT_DWORDS 2 +#endif +#define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_DWORDS \ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) -#define IGU_FIFO_DEPTH_ELEMENTS 64 + +#ifndef IGU_FIFO_ELEMENT_DWORDS #define IGU_FIFO_ELEMENT_DWORDS 4 +#endif +#define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_DWORDS \ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) -#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 + +#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 +#endif +#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ PROTECTION_OVERRIDE_ELEMENT_DWORDS) + #define MCP_SPAD_TRACE_OFFSIZE_ADDR \ (MCP_REG_SCRATCH + \ offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) -#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa + #define EMPTY_FW_VERSION_STR "???_???_???_???" #define EMPTY_FW_IMAGE_STR "???????????????" /***************************** Constant Arrays *******************************/ +struct dbg_array { + const u32 *ptr; + u32 size_in_dwords; +}; + /* Debug arrays */ -static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; +static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb_b0", - { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0}, - {0, 0, 0}, {0, 0, 0} } }, - { "k2", - { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0}, - {0, 0, 0}, {0, 0, 0} } } + { "bb", + {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0} } }, + { "ah", + {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0} } } }; /* Storm constant definitions array */ @@ -427,69 +500,74 @@ static struct storm_defs s_storm_defs[] = { {'T', BLOCK_TSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, - TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, - TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, + TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, TCM_REG_CTX_RBC_ACCS, 4, TCM_REG_AGG_CON_CTX, 16, TCM_REG_SM_CON_CTX, 2, TCM_REG_AGG_TASK_CTX, 4, TCM_REG_SM_TASK_CTX}, + /* Mstorm */ {'M', BLOCK_MSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, - MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, - MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY, + MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2, MCM_REG_CTX_RBC_ACCS, 1, MCM_REG_AGG_CON_CTX, 10, MCM_REG_SM_CON_CTX, 2, MCM_REG_AGG_TASK_CTX, 7, MCM_REG_SM_TASK_CTX}, + /* Ustorm */ {'U', BLOCK_USEM, {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, - USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, - USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY, + USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2, + USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2, UCM_REG_CTX_RBC_ACCS, 2, UCM_REG_AGG_CON_CTX, 13, UCM_REG_SM_CON_CTX, 3, UCM_REG_AGG_TASK_CTX, 3, UCM_REG_SM_TASK_CTX}, + /* Xstorm */ {'X', BLOCK_XSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, - XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, - XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY, + XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2, XCM_REG_CTX_RBC_ACCS, 9, XCM_REG_AGG_CON_CTX, 15, XCM_REG_SM_CON_CTX, 0, 0, 0, 0}, + /* Ystorm */ {'Y', BLOCK_YSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, - YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, - YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, + YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, YCM_REG_CTX_RBC_ACCS, 2, YCM_REG_AGG_CON_CTX, 3, YCM_REG_SM_CON_CTX, 2, YCM_REG_AGG_TASK_CTX, 12, YCM_REG_SM_TASK_CTX}, + /* Pstorm */ {'P', BLOCK_PSEM, {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, - PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, - PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY, + PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2, PCM_REG_CTX_RBC_ACCS, 0, 0, 10, PCM_REG_SM_CON_CTX, @@ -498,6 +576,7 @@ static struct storm_defs s_storm_defs[] = { }; /* Block definitions array */ + static struct block_defs block_grc_defs = { "grc", {true, true}, false, 0, @@ -587,9 +666,11 @@ static struct block_defs block_pcie_defs = { "pcie", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, - PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, - PCIE_REG_DBG_COMMON_FORCE_FRAME, + PCIE_REG_DBG_COMMON_SELECT_K2, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, + PCIE_REG_DBG_COMMON_SHIFT_K2, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -691,9 +772,9 @@ static struct block_defs block_pglcs_defs = { "pglcs", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE, - PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID, - PGLCS_REG_DBG_FORCE_FRAME, + PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2, + PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2, + PGLCS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 2 }; @@ -991,10 +1072,11 @@ static struct block_defs block_yuld_defs = { "yuld", {true, true}, false, 0, {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE, - YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID, - YULD_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 + YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2, + YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2, + YULD_REG_DBG_FORCE_FRAME_BB_K2, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 15 }; static struct block_defs block_xyld_defs = { @@ -1143,9 +1225,9 @@ static struct block_defs block_umac_defs = { "umac", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE, - UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID, - UMAC_REG_DBG_FORCE_FRAME, + UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2, + UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2, + UMAC_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 6 }; @@ -1177,9 +1259,9 @@ static struct block_defs block_wol_defs = { "wol", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE, - WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID, - WOL_REG_DBG_FORCE_FRAME, + WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2, + WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2, + WOL_REG_DBG_FORCE_FRAME_K2, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 }; @@ -1187,9 +1269,9 @@ static struct block_defs block_bmbn_defs = { "bmbn", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, - BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE, - BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID, - BMBN_REG_DBG_FORCE_FRAME, + BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2, + BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2, + BMBN_REG_DBG_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1204,9 +1286,9 @@ static struct block_defs block_nwm_defs = { "nwm", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE, - NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID, - NWM_REG_DBG_FORCE_FRAME, + NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2, + NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2, + NWM_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 }; @@ -1214,9 +1296,9 @@ static struct block_defs block_nws_defs = { "nws", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE, - NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID, - NWS_REG_DBG_FORCE_FRAME, + NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2, + NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2, + NWS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 12 }; @@ -1224,9 +1306,9 @@ static struct block_defs block_ms_defs = { "ms", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE, - MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID, - MS_REG_DBG_FORCE_FRAME, + MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2, + MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2, + MS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 13 }; @@ -1234,9 +1316,11 @@ static struct block_defs block_phy_pcie_defs = { "phy_pcie", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, - PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, - PCIE_REG_DBG_COMMON_FORCE_FRAME, + PCIE_REG_DBG_COMMON_SELECT_K2, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, + PCIE_REG_DBG_COMMON_SHIFT_K2, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1261,6 +1345,13 @@ static struct block_defs block_rgfs_defs = { false, false, MAX_DBG_RESET_REGS, 0 }; +static struct block_defs block_rgsrc_defs = { + "rgsrc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + static struct block_defs block_tgfs_defs = { "tgfs", {false, false}, false, 0, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, @@ -1268,6 +1359,13 @@ static struct block_defs block_tgfs_defs = { false, false, MAX_DBG_RESET_REGS, 0 }; +static struct block_defs block_tgsrc_defs = { + "tgsrc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + static struct block_defs block_ptld_defs = { "ptld", {false, false}, false, 0, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, @@ -1350,6 +1448,8 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_muld_defs, &block_yuld_defs, &block_xyld_defs, + &block_ptld_defs, + &block_ypld_defs, &block_prm_defs, &block_pbf_pb1_defs, &block_pbf_pb2_defs, @@ -1363,6 +1463,10 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_tcfc_defs, &block_igu_defs, &block_cau_defs, + &block_rgfs_defs, + &block_rgsrc_defs, + &block_tgfs_defs, + &block_tgsrc_defs, &block_umac_defs, &block_xmac_defs, &block_dbg_defs, @@ -1376,10 +1480,6 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_phy_pcie_defs, &block_led_defs, &block_avs_wrap_defs, - &block_rgfs_defs, - &block_tgfs_defs, - &block_ptld_defs, - &block_ypld_defs, &block_misc_aeu_defs, &block_bar0_map_defs, }; @@ -1392,66 +1492,151 @@ static struct platform_defs s_platform_defs[] = { }; static struct grc_param_defs s_grc_param_defs[] = { - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ + /* DBG_GRC_PARAM_DUMP_TSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_MSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_USTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_XSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_YSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_PSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_REGS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_RAM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PBUF */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_IOR */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_VFC */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CM_CTX */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_ILT */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_RSS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CAU */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_QM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_MCP */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_RESERVED */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CFC */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_IGU */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BRB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BTB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BMB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_NIG */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_MULD */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PRS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_DMAE */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_TM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_SDM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_DIF */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_STATIC */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_UNSTALL */ + {{0, 0}, 0, 1, false, 0, 0}, + + /* DBG_GRC_PARAM_NUM_LCIDS */ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, - MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */ + MAX_LCIDS}, + + /* DBG_GRC_PARAM_NUM_LTIDS */ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, - MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */ + MAX_LTIDS}, + + /* DBG_GRC_PARAM_EXCLUDE_ALL */ + {{0, 0}, 0, 1, true, 0, 0}, + + /* DBG_GRC_PARAM_CRASH */ + {{0, 0}, 0, 1, true, 0, 0}, + + /* DBG_GRC_PARAM_PARITY_SAFE */ + {{0, 0}, 0, 1, false, 1, 0}, + + /* DBG_GRC_PARAM_DUMP_CM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PHY */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_NO_MCP */ + {{0, 0}, 0, 1, false, 0, 0}, + + /* DBG_GRC_PARAM_NO_FW_VER */ + {{0, 0}, 0, 1, false, 0, 0} }; static struct rss_mem_defs s_rss_mem_defs[] = { { "rss_mem_cid", "rss_cid", 0, {256, 320}, {32, 32} }, + { "rss_mem_key_msb", "rss_key", 1024, {128, 208}, {256, 256} }, + { "rss_mem_key_lsb", "rss_key", 2048, {128, 208}, {64, 64} }, + { "rss_mem_info", "rss_info", 3072, {128, 208}, {16, 16} }, + { "rss_mem_ind", "rss_ind", 4096, - {(128 * 128), (128 * 208)}, + {16384, 26624}, {16, 16} } }; @@ -1466,50 +1651,71 @@ static struct big_ram_defs s_big_ram_defs[] = { { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, {4800, 5632} }, + { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, {2880, 3680} }, + { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, {1152, 1152} } }; static struct reset_reg_defs s_reset_regs_defs[] = { + /* DBG_RESET_REG_MISCS_PL_UA */ { MISCS_REG_RESET_PL_UA, 0x0, - {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ + {true, true} }, + + /* DBG_RESET_REG_MISCS_PL_HV */ { MISCS_REG_RESET_PL_HV, 0x0, - {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ - { MISCS_REG_RESET_PL_HV_2, 0x0, - {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ + {true, true} }, + + /* DBG_RESET_REG_MISCS_PL_HV_2 */ + { MISCS_REG_RESET_PL_HV_2_K2, 0x0, + {false, true} }, + + /* DBG_RESET_REG_MISC_PL_UA */ { MISC_REG_RESET_PL_UA, 0x0, - {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_HV */ { MISC_REG_RESET_PL_HV, 0x0, - {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ { MISC_REG_RESET_PL_PDA_VAUX, 0x2, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ + {true, true} }, }; static struct phy_defs s_phy_defs[] = { - {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8}, - {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131}, - {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, - {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, + {"nw_phy", NWS_REG_NWS_CMU_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; /**************************** Private Functions ******************************/ @@ -1556,7 +1762,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { - dev_data->chip_id = CHIP_BB_B0; + dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; } else { return DBG_STATUS_UNKNOWN_CHIP; @@ -1569,9 +1775,20 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, qed_dbg_grc_init_params(p_hwfn); dev_data->initialized = true; + return DBG_STATUS_OK; } +static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * + MAX_CHIP_IDS + + dev_data->chip_id]; +} + /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ @@ -1579,25 +1796,28 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 storm_id, struct fw_info *fw_info) { - /* Read first the address that points to fw_info location. - * The address is located in the last line of the Storm RAM. - */ - u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_INT_RAM + - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - - sizeof(struct fw_info_location); + struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; - u32 *dest = (u32 *)&fw_info_location; - u32 i; + u32 addr, i, *dest; memset(&fw_info_location, 0, sizeof(fw_info_location)); memset(fw_info, 0, sizeof(*fw_info)); + + /* Read first the address that points to fw_info location. + * The address is located in the last line of the Storm RAM. + */ + addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + sizeof(fw_info_location); + dest = (u32 *)&fw_info_location; + for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); + + /* Read FW version info from Storm RAM */ if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) { - /* Read FW version info from Storm RAM */ addr = fw_info_location.grc_addr; dest = (u32 *)fw_info; for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); @@ -1606,27 +1826,30 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, } } -/* Dumps the specified string to the specified buffer. Returns the dumped size - * in bytes (actual length + 1 for the null character termination). +/* Dumps the specified string to the specified buffer. + * Returns the dumped size in bytes. */ static u32 qed_dump_str(char *dump_buf, bool dump, const char *str) { if (dump) strcpy(dump_buf, str); + return (u32)strlen(str) + 1; } -/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size - * in bytes. +/* Dumps zeros to align the specified buffer to dwords. + * Returns the dumped size in bytes. */ static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset) { - u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size; + u8 offset_in_dword, align_size; + offset_in_dword = (u8)(byte_offset & 0x3); align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0; if (dump && align_size) memset(dump_buf, 0, align_size); + return align_size; } @@ -1653,6 +1876,7 @@ static u32 qed_dump_str_param(u32 *dump_buf, /* Align buffer to next dword */ offset += qed_dump_align(char_buf + offset, dump, offset); + return BYTES_TO_DWORDS(offset); } @@ -1681,6 +1905,7 @@ static u32 qed_dump_num_param(u32 *dump_buf, if (dump) *(dump_buf + offset) = param_val; offset++; + return offset; } @@ -1695,7 +1920,6 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; - int printed_chars; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { @@ -1705,37 +1929,32 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) { - /* Read FW version/image */ - if (!dev_data->block_in_reset - [s_storm_defs[storm_id].block_id]) { - /* read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, - p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - printed_chars = - snprintf(fw_ver_str, - sizeof(fw_ver_str), - "%d_%d_%d_%d", - fw_info.ver.num.major, - fw_info.ver.num.minor, - fw_info.ver.num.rev, - fw_info.ver.num.eng); - if (printed_chars < 0 || printed_chars >= - sizeof(fw_ver_str)) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } + struct storm_defs *storm = &s_storm_defs[storm_id]; - found = true; + /* Read FW version/image */ + if (dev_data->block_in_reset[storm->block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } + + found = true; } } @@ -1747,6 +1966,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp); + return offset; } @@ -1759,17 +1979,18 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, { char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; - if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { u32 global_section_offsize, global_section_addr, mfw_ver; u32 public_data_addr, global_section_offsize_addr; - int printed_chars; - /* Find MCP public data GRC address. - * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug. + /* Find MCP public data GRC address. Needs to be ORed with + * MCP_REG_SCRATCH due to a HW bug. */ - public_data_addr = qed_rd(p_hwfn, p_ptt, + public_data_addr = qed_rd(p_hwfn, + p_ptt, MISC_REG_SHARED_MEM_ADDR) | - MCP_REG_SCRATCH; + MCP_REG_SCRATCH; /* Find MCP public global section offset */ global_section_offsize_addr = public_data_addr + @@ -1778,9 +1999,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, sizeof(offsize_t) * PUBLIC_GLOBAL; global_section_offsize = qed_rd(p_hwfn, p_ptt, global_section_offsize_addr); - global_section_addr = MCP_REG_SCRATCH + - (global_section_offsize & - OFFSIZE_OFFSET_MASK) * 4; + global_section_addr = + MCP_REG_SCRATCH + + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4; /* Read MFW version from MCP public global section */ mfw_ver = qed_rd(p_hwfn, p_ptt, @@ -1788,13 +2009,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, offsetof(struct public_global, mfw_ver)); /* Dump MFW version param */ - printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str), - "%d_%d_%d_%d", - (u8) (mfw_ver >> 24), - (u8) (mfw_ver >> 16), - (u8) (mfw_ver >> 8), - (u8) mfw_ver); - if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str)) + if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", + (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), + (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid MFW version string\n"); } @@ -1820,11 +2037,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, bool dump, u8 num_specific_global_params) { - u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0; + u8 num_params; - /* Find platform string and dump global params section header */ + /* Dump global params section header */ + num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; offset += qed_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params); @@ -1846,25 +2064,29 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); + return offset; } -/* Writes the last section to the specified buffer at the given offset. - * Returns the dumped size in dwords. +/* Writes the "last" section (including CRC) to the specified buffer at the + * given offset. Returns the dumped size in dwords. */ -static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) +static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn, + u32 *dump_buf, u32 offset, bool dump) { - u32 start_offset = offset, crc = ~0; + u32 start_offset = offset; /* Dump CRC section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); - /* Calculate CRC32 and add it to the dword following the "last" section. - */ + /* Calculate CRC32 and add it to the dword after the "last" section */ if (dump) - *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf, + *(dump_buf + offset) = ~crc32(0xffffffff, + (u8 *)dump_buf, DWORDS_TO_BYTES(offset)); + offset++; + return offset - start_offset; } @@ -1883,11 +2105,12 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, p_ptt, s_reset_regs_defs[i].addr); /* Check if blocks are in reset */ - for (i = 0; i < MAX_BLOCK_ID; i++) - dev_data->block_in_reset[i] = - s_block_defs[i]->has_reset_bit && - !(reg_val[s_block_defs[i]->reset_reg] & - BIT(s_block_defs[i]->reset_bit_offset)); + for (i = 0; i < MAX_BLOCK_ID; i++) { + struct block_defs *block = s_block_defs[i]; + + dev_data->block_in_reset[i] = block->has_reset_bit && + !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset)); + } } /* Enable / disable the Debug block */ @@ -1902,12 +2125,12 @@ static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val; + struct block_defs *dbg_block = s_block_defs[BLOCK_DBG]; - dbg_reset_reg_addr = - s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr; + dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr; old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr); - new_reset_reg_val = old_reset_reg_val & - ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset); + new_reset_reg_val = + old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset); qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val); qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val); @@ -1920,8 +2143,8 @@ static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode); } -/* Enable / disable Debug Bus clients according to the specified mask. - * (1 = enable, 0 = disable) +/* Enable / disable Debug Bus clients according to the specified mask + * (1 = enable, 0 = disable). */ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 client_mask) @@ -1931,10 +2154,14 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) { - const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; bool arg1, arg2; + const u32 *ptr; + u8 tree_val; + + /* Get next element from modes tree buffer */ + ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; + tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: @@ -1974,75 +2201,81 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, enum block_id block_id, u8 mem_group_id) { + struct block_defs *block = s_block_defs[block_id]; u8 i; /* Check Storm match */ - if (s_block_defs[block_id]->associated_to_storm && + if (block->associated_to_storm && !qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)s_block_defs[block_id]->storm_id)) + (enum dbg_storms)block->storm_id)) return false; - for (i = 0; i < NUM_BIG_RAM_TYPES; i++) - if (mem_group_id == s_big_ram_defs[i].mem_group_id || - mem_group_id == s_big_ram_defs[i].ram_mem_group_id) - return qed_grc_is_included(p_hwfn, - s_big_ram_defs[i].grc_param); - if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id == - MEM_GROUP_PXP_MEM) + for (i = 0; i < NUM_BIG_RAM_TYPES; i++) { + struct big_ram_defs *big_ram = &s_big_ram_defs[i]; + + if (mem_group_id == big_ram->mem_group_id || + mem_group_id == big_ram->ram_mem_group_id) + return qed_grc_is_included(p_hwfn, big_ram->grc_param); + } + + switch (mem_group_id) { + case MEM_GROUP_PXP_ILT: + case MEM_GROUP_PXP_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP); - if (mem_group_id == MEM_GROUP_RAM) + case MEM_GROUP_RAM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM); - if (mem_group_id == MEM_GROUP_PBUF) + case MEM_GROUP_PBUF: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF); - if (mem_group_id == MEM_GROUP_CAU_MEM || - mem_group_id == MEM_GROUP_CAU_SB || - mem_group_id == MEM_GROUP_CAU_PI) + case MEM_GROUP_CAU_MEM: + case MEM_GROUP_CAU_SB: + case MEM_GROUP_CAU_PI: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); - if (mem_group_id == MEM_GROUP_QM_MEM) + case MEM_GROUP_QM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM || - mem_group_id == MEM_GROUP_TASK_CFC_MEM) + case MEM_GROUP_CFC_MEM: + case MEM_GROUP_CONN_CFC_MEM: + case MEM_GROUP_TASK_CFC_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC); - if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id == - MEM_GROUP_IGU_MSIX) + case MEM_GROUP_IGU_MEM: + case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); - if (mem_group_id == MEM_GROUP_MULD_MEM) + case MEM_GROUP_MULD_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD); - if (mem_group_id == MEM_GROUP_PRS_MEM) + case MEM_GROUP_PRS_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS); - if (mem_group_id == MEM_GROUP_DMAE_MEM) + case MEM_GROUP_DMAE_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE); - if (mem_group_id == MEM_GROUP_TM_MEM) + case MEM_GROUP_TM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM); - if (mem_group_id == MEM_GROUP_SDM_MEM) + case MEM_GROUP_SDM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM); - if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id == - MEM_GROUP_RDIF_CTX) + case MEM_GROUP_TDIF_CTX: + case MEM_GROUP_RDIF_CTX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF); - if (mem_group_id == MEM_GROUP_CM_MEM) + case MEM_GROUP_CM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM); - if (mem_group_id == MEM_GROUP_IOR) + case MEM_GROUP_IOR: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR); - - return true; + default: + return true; + } } /* Stalls all Storms */ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool stall) { - u8 reg_val = stall ? 1 : 0; + u32 reg_addr; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - if (qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id)) { - u32 reg_addr = - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STALL_0; + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id)) + continue; - qed_wr(p_hwfn, p_ptt, reg_addr, reg_val); - } + reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + + SEM_FAST_REG_STALL_0_BB_K2; + qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } msleep(STALL_DELAY_MS); @@ -2054,24 +2287,29 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; - u32 i; + u32 block_id, i; /* Fill reset regs values */ - for (i = 0; i < MAX_BLOCK_ID; i++) - if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset) - reg_val[s_block_defs[i]->reset_reg] |= - BIT(s_block_defs[i]->reset_bit_offset); + for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + struct block_defs *block = s_block_defs[block_id]; + + if (block->has_reset_bit && block->unreset) + reg_val[block->reset_reg] |= + BIT(block->reset_bit_offset); + } /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { - reg_val[i] |= s_reset_regs_defs[i].unreset_val; - if (reg_val[i]) - qed_wr(p_hwfn, - p_ptt, - s_reset_regs_defs[i].addr + - RESET_REG_UNRESET_OFFSET, reg_val[i]); - } + if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) + continue; + + reg_val[i] |= s_reset_regs_defs[i].unreset_val; + + if (reg_val[i]) + qed_wr(p_hwfn, + p_ptt, + s_reset_regs_defs[i].addr + + RESET_REG_UNRESET_OFFSET, reg_val[i]); } } @@ -2095,6 +2333,7 @@ qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type, qed_get_block_attn_data(block_id, attn_type); *num_attn_regs = block_type_data->num_regs; + return &((const struct dbg_attn_reg *) s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data-> regs_offset]; @@ -2105,34 +2344,34 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + const struct dbg_attn_reg *attn_reg_arr; u8 reg_idx, num_attn_regs; u32 block_id; for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - const struct dbg_attn_reg *attn_reg_arr; - if (dev_data->block_in_reset[block_id]) continue; attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; + u16 modes_buf_offset; + bool eval_mode; /* Check mode */ - bool eval_mode = GET_FIELD(reg_data->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - u16 modes_buf_offset = + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); + /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) - /* Mode match - read parity status read-clear - * register. - */ qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data-> sts_clr_address)); @@ -2142,11 +2381,11 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - 'count' = num_dumped_entries - * - 'split' = split_type - * - 'id' = split_id (dumped only if split_id >= 0) - * - 'param_name' = param_val (user param, dumped only if param_name != NULL and - * param_val != NULL) + * - count: no. of dumped entries + * - split: split type + * - id: split ID (dumped only if split_id >= 0) + * - param_name: user parameter value (dumped only if param_name != NULL + * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, @@ -2170,84 +2409,100 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, if (param_name && param_val) offset += qed_dump_str_param(dump_buf + offset, dump, param_name, param_val); + return offset; } /* Dumps the GRC registers in the specified address range. * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len) + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus) { u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i; - if (dump) - for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); - else - offset += len; + if (!dump) + return len; + + for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) + *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); + return offset; } -/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */ -static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr, - u32 len) +/* Dumps GRC registers sequence header. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, + bool dump, u32 addr, u32 len) { if (dump) *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT); + return 1; } -/* Dumps GRC registers sequence. Returns the dumped size in dwords. */ +/* Dumps GRC registers sequence. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len) + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus) { u32 offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, - dump_buf + offset, dump, addr, len); + dump_buf + offset, + dump, addr, len, wide_bus); + return offset; } /* Dumps GRC registers sequence with skip cycle. * Returns the dumped size in dwords. + * - addr: start GRC address in dwords + * - total_len: total no. of dwords to dump + * - read_len: no. consecutive dwords to read + * - skip_len: no. of dwords to skip (and fill with zeros) */ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 total_len, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 addr, + u32 total_len, u32 read_len, u32 skip_len) { u32 offset = 0, reg_offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len); - if (dump) { - while (reg_offset < total_len) { - u32 curr_len = min_t(u32, - read_len, - total_len - reg_offset); - offset += qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, addr, curr_len); + + if (!dump) + return offset + total_len; + + while (reg_offset < total_len) { + u32 curr_len = min_t(u32, read_len, total_len - reg_offset); + + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, curr_len, false); + reg_offset += curr_len; + addr += curr_len; + + if (reg_offset < total_len) { + curr_len = min_t(u32, skip_len, total_len - skip_len); + memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len)); + offset += curr_len; reg_offset += curr_len; addr += curr_len; - if (reg_offset < total_len) { - curr_len = min_t(u32, - skip_len, - total_len - skip_len); - memset(dump_buf + offset, 0, - DWORDS_TO_BYTES(curr_len)); - offset += curr_len; - reg_offset += curr_len; - addr += curr_len; - } } - } else { - offset += total_len; } return offset; @@ -2266,43 +2521,48 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, bool mode_match = true; *num_dumped_reg_entries = 0; + while (input_offset < input_regs_arr.size_in_dwords) { const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *) &input_regs_arr.ptr[input_offset++]; - bool eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; + u16 modes_buf_offset; + bool eval_mode; /* Check mode/block */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } - if (mode_match && block_enable[cond_hdr->block_id]) { - for (i = 0; i < cond_hdr->data_size; - i++, input_offset++) { - const struct dbg_dump_reg *reg = - (const struct dbg_dump_reg *) - &input_regs_arr.ptr[input_offset]; - u32 addr, len; - - addr = GET_FIELD(reg->data, - DBG_DUMP_REG_ADDRESS); - len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); - offset += - qed_grc_dump_reg_entry(p_hwfn, p_ptt, - dump_buf + offset, - dump, - addr, - len); - (*num_dumped_reg_entries)++; - } - } else { + if (!mode_match || !block_enable[cond_hdr->block_id]) { input_offset += cond_hdr->data_size; + continue; + } + + for (i = 0; i < cond_hdr->data_size; i++, input_offset++) { + const struct dbg_dump_reg *reg = + (const struct dbg_dump_reg *) + &input_regs_arr.ptr[input_offset]; + u32 addr, len; + bool wide_bus; + + addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS); + len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); + wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + wide_bus); + (*num_dumped_reg_entries)++; } } @@ -2350,8 +2610,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, return num_dumped_reg_entries > 0 ? offset : 0; } -/* Dumps registers according to the input registers array. - * Returns the dumped size in dwords. +/* Dumps registers according to the input registers array. Returns the dumped + * size in dwords. */ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2361,29 +2621,37 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *p_platform_defs; + struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *p_chip_defs; + struct chip_defs *chip; u8 port_id, pf_id, vf_id; u16 fid; - p_chip_defs = &s_chip_defs[dev_data->chip_id]; - p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id]; + chip = &s_chip_defs[dev_data->chip_id]; + chip_platform = &chip->per_platform[dev_data->platform_id]; if (dump) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); + while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { - const struct dbg_dump_split_hdr *split_hdr = + const struct dbg_dump_split_hdr *split_hdr; + struct dbg_array curr_input_regs_arr; + u32 split_data_size; + u8 split_type_id; + + split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - u8 split_type_id = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - u32 split_data_size = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); - struct dbg_array curr_input_regs_arr = { - &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset], - split_data_size}; + split_type_id = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_regs_arr.ptr = + &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; + curr_input_regs_arr.size_in_dwords = split_data_size; switch (split_type_id) { case SPLIT_TYPE_NONE: @@ -2398,8 +2666,9 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_name, param_val); break; + case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < p_platform_defs->num_ports; + for (port_id = 0; port_id < chip_platform->num_ports; port_id++) { if (dump) qed_port_pretend(p_hwfn, p_ptt, @@ -2414,9 +2683,10 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_val); } break; + case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < p_platform_defs->num_pfs; + for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) { u8 pfid_shift = PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; @@ -2427,17 +2697,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, } offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, + qed_grc_dump_split_data(p_hwfn, + p_ptt, curr_input_regs_arr, dump_buf + offset, - dump, block_enable, - "pf", pf_id, + dump, + block_enable, + "pf", + pf_id, param_name, param_val); } break; + case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < p_platform_defs->num_vfs; + for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) { u8 vfvalid_shift = PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; @@ -2460,6 +2734,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_val); } break; + default: break; } @@ -2490,35 +2765,37 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { - u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr); + if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) + continue; - offset += qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - num_regs++; - } + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + BYTES_TO_DWORDS + (s_reset_regs_defs[i].addr), 1, + false); + num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, NULL, NULL); + return offset; } -/* Dump registers that are modified during GRC Dump and therefore must be dumped - * first. Returns the dumped size in dwords. +/* Dump registers that are modified during GRC Dump and therefore must be + * dumped first. Returns the dumped size in dwords. */ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 offset = 0, num_reg_entries = 0, block_id; + u32 block_id, offset = 0, num_reg_entries = 0; + const struct dbg_attn_reg *attn_reg_arr; u8 storm_id, reg_idx, num_attn_regs; /* Calculate header size */ @@ -2527,14 +2804,13 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - const struct dbg_attn_reg *attn_reg_arr; - if (dev_data->block_in_reset[block_id] && dump) continue; attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; @@ -2548,37 +2824,36 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - if (!eval_mode || - qed_is_mode_match(p_hwfn, &modes_buf_offset)) { - /* Mode match - read and dump registers */ - addr = reg_data->mask_address; - offset += - qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - addr = GET_FIELD(reg_data->data, - DBG_ATTN_REG_STS_ADDRESS); - offset += - qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - num_reg_entries += 2; - } + if (eval_mode && + !qed_is_mode_match(p_hwfn, &modes_buf_offset)) + continue; + + /* Mode match: read & dump registers */ + addr = reg_data->mask_address; + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false); + addr = GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false); + num_reg_entries += 2; } } - /* Write storm stall status registers */ + /* Write Storm stall status registers */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 addr; - if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] && - dump) + if (dev_data->block_in_reset[storm->block_id] && dump) continue; addr = @@ -2589,7 +2864,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1); + 1, + false); num_reg_entries++; } @@ -2598,6 +2874,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, NULL, NULL); + return offset; } @@ -2637,17 +2914,17 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, return offset; } -/* Dumps a GRC memory header (section and params). - * The following parameters are dumped: - * name - name is dumped only if it's not NULL. - * addr - addr is dumped only if name is NULL. - * len - len is always dumped. - * width - bit_width is dumped if it's not zero. - * packed - packed=1 is dumped if it's not false. - * mem_group - mem_group is always dumped. - * is_storm - true only if the memory is related to a Storm. - * storm_letter - storm letter (valid only if is_storm is true). - * Returns the dumped size in dwords. +/* Dumps a GRC memory header (section and params). Returns the dumped size in + * dwords. The following parameters are dumped: + * - name: dumped only if it's not NULL. + * - addr: in dwords, dumped only if name is NULL. + * - len: in dwords, always dumped. + * - width: dumped if it's not zero. + * - packed: dumped only if it's not false. + * - mem_group: always dumped. + * - is_storm: true only if the memory is related to a Storm. + * - storm_letter: valid only if is_storm is true. + * */ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -2667,6 +2944,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, if (!len) DP_NOTICE(p_hwfn, "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); + if (bit_width) num_params++; if (packed) @@ -2675,6 +2953,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, /* Dump section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params); + if (name) { /* Dump name */ if (is_storm) { @@ -2694,14 +2973,15 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, len, buf); } else { /* Dump address */ + u32 addr_in_bytes = DWORDS_TO_BYTES(addr); + offset += qed_dump_num_param(dump_buf + offset, - dump, "addr", - DWORDS_TO_BYTES(addr)); + dump, "addr", addr_in_bytes); if (dump && len > 64) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", - len, (u32)DWORDS_TO_BYTES(addr)); + len, addr_in_bytes); } /* Dump len */ @@ -2727,11 +3007,13 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, } offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf); + return offset; } /* Dumps a single GRC memory. If name is NULL, the memory is stored by address. * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2740,6 +3022,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, const char *name, u32 addr, u32 len, + bool wide_bus, u32 bit_width, bool packed, const char *mem_group, @@ -2758,7 +3041,9 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, mem_group, is_storm, storm_letter); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, - dump_buf + offset, dump, addr, len); + dump_buf + offset, + dump, addr, len, wide_bus); + return offset; } @@ -2773,20 +3058,21 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, while (input_offset < input_mems_arr.size_in_dwords) { const struct dbg_dump_cond_hdr *cond_hdr; + u16 modes_buf_offset; u32 num_entries; bool eval_mode; cond_hdr = (const struct dbg_dump_cond_hdr *) &input_mems_arr.ptr[input_offset++]; - eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; + num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; /* Check required mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } @@ -2796,81 +3082,87 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, continue; } - num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *) &input_mems_arr.ptr[input_offset]; - u8 mem_group_id; + u8 mem_group_id = GET_FIELD(mem->dword0, + DBG_DUMP_MEM_MEM_GROUP_ID); + bool is_storm = false, mem_wide_bus; + enum dbg_grc_params grc_param; + char storm_letter = 'a'; + enum block_id block_id; + u32 mem_addr, mem_len; - mem_group_id = GET_FIELD(mem->dword0, - DBG_DUMP_MEM_MEM_GROUP_ID); if (mem_group_id >= MEM_GROUPS_NUM) { DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); return 0; } - if (qed_grc_is_mem_included(p_hwfn, - (enum block_id)cond_hdr->block_id, - mem_group_id)) { - u32 mem_addr = GET_FIELD(mem->dword0, - DBG_DUMP_MEM_ADDRESS); - u32 mem_len = GET_FIELD(mem->dword1, - DBG_DUMP_MEM_LENGTH); - enum dbg_grc_params grc_param; - char storm_letter = 'a'; - bool is_storm = false; - - /* Update memory length for CCFC/TCFC memories - * according to number of LCIDs/LTIDs. - */ - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { - if (mem_len % MAX_LCIDS != 0) { - DP_NOTICE(p_hwfn, - "Invalid CCFC connection memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LCIDS; - mem_len = qed_grc_get_param(p_hwfn, - grc_param) * - (mem_len / MAX_LCIDS); - } else if (mem_group_id == - MEM_GROUP_TASK_CFC_MEM) { - if (mem_len % MAX_LTIDS != 0) { - DP_NOTICE(p_hwfn, - "Invalid TCFC task memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LTIDS; - mem_len = qed_grc_get_param(p_hwfn, - grc_param) * - (mem_len / MAX_LTIDS); + block_id = (enum block_id)cond_hdr->block_id; + if (!qed_grc_is_mem_included(p_hwfn, + block_id, + mem_group_id)) + continue; + + mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS); + mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH); + mem_wide_bus = GET_FIELD(mem->dword1, + DBG_DUMP_MEM_WIDE_BUS); + + /* Update memory length for CCFC/TCFC memories + * according to number of LCIDs/LTIDs. + */ + if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { + if (mem_len % MAX_LCIDS) { + DP_NOTICE(p_hwfn, + "Invalid CCFC connection memory size\n"); + return 0; } - /* If memory is associated with Storm, update - * Storm details. - */ - if (s_block_defs[cond_hdr->block_id]-> - associated_to_storm) { - is_storm = true; - storm_letter = - s_storm_defs[s_block_defs[ - cond_hdr->block_id]-> - storm_id].letter; + grc_param = DBG_GRC_PARAM_NUM_LCIDS; + mem_len = qed_grc_get_param(p_hwfn, grc_param) * + (mem_len / MAX_LCIDS); + } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) { + if (mem_len % MAX_LTIDS) { + DP_NOTICE(p_hwfn, + "Invalid TCFC task memory size\n"); + return 0; } - /* Dump memory */ - offset += qed_grc_dump_mem(p_hwfn, p_ptt, - dump_buf + offset, dump, NULL, - mem_addr, mem_len, 0, + grc_param = DBG_GRC_PARAM_NUM_LTIDS; + mem_len = qed_grc_get_param(p_hwfn, grc_param) * + (mem_len / MAX_LTIDS); + } + + /* If memory is associated with Storm, update Storm + * details. + */ + if (s_block_defs + [cond_hdr->block_id]->associated_to_storm) { + is_storm = true; + storm_letter = + s_storm_defs[s_block_defs + [cond_hdr->block_id]-> + storm_id].letter; + } + + /* Dump memory */ + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + NULL, + mem_addr, + mem_len, + mem_wide_bus, + 0, false, s_mem_group_names[mem_group_id], - is_storm, storm_letter); - } - } + is_storm, + storm_letter); + } } return offset; @@ -2887,16 +3179,22 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { - const struct dbg_dump_split_hdr *split_hdr = - (const struct dbg_dump_split_hdr *) + const struct dbg_dump_split_hdr *split_hdr; + struct dbg_array curr_input_mems_arr; + u32 split_data_size; + u8 split_type_id; + + split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - u8 split_type_id = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - u32 split_data_size = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); - struct dbg_array curr_input_mems_arr = { - &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset], - split_data_size}; + split_type_id = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_mems_arr.ptr = + &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; + curr_input_mems_arr.size_in_dwords = split_data_size; switch (split_type_id) { case SPLIT_TYPE_NONE: @@ -2906,6 +3204,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); break; + default: DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); @@ -2920,6 +3219,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, /* Dumps GRC context data for the specified Storm. * Returns the dumped size in dwords. + * The lid_size argument is specified in quad-regs. */ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2931,13 +3231,15 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, u32 rd_reg_addr, u8 storm_id) { - u32 i, lid, total_size; - u32 offset = 0; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 i, lid, total_size, offset = 0; if (!lid_size) return 0; + lid_size *= BYTES_IN_DWORD; total_size = num_lids * lid_size; + offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, @@ -2945,25 +3247,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, 0, total_size, lid_size * 32, - false, - name, - true, s_storm_defs[storm_id].letter); + false, name, true, storm->letter); + + if (!dump) + return offset + total_size; /* Dump context data */ - if (dump) { - for (lid = 0; lid < num_lids; lid++) { - for (i = 0; i < lid_size; i++, offset++) { - qed_wr(p_hwfn, - p_ptt, - s_storm_defs[storm_id].cm_ctx_wr_addr, - (i << 9) | lid); - *(dump_buf + offset) = qed_rd(p_hwfn, - p_ptt, - rd_reg_addr); - } + for (lid = 0; lid < num_lids; lid++) { + for (i = 0; i < lid_size; i++, offset++) { + qed_wr(p_hwfn, + p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid); + *(dump_buf + offset) = qed_rd(p_hwfn, + p_ptt, rd_reg_addr); } - } else { - offset += total_size; } return offset; @@ -2973,15 +3269,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { + enum dbg_grc_params grc_param; u32 offset = 0; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id)) continue; /* Dump Conn AG context size */ + grc_param = DBG_GRC_PARAM_NUM_LCIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -2989,14 +3289,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "CONN_AG_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS), - s_storm_defs[storm_id]. - cm_conn_ag_ctx_lid_size, - s_storm_defs[storm_id]. - cm_conn_ag_ctx_rd_addr, + grc_param), + storm->cm_conn_ag_ctx_lid_size, + storm->cm_conn_ag_ctx_rd_addr, storm_id); /* Dump Conn ST context size */ + grc_param = DBG_GRC_PARAM_NUM_LCIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3004,14 +3303,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "CONN_ST_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS), - s_storm_defs[storm_id]. - cm_conn_st_ctx_lid_size, - s_storm_defs[storm_id]. - cm_conn_st_ctx_rd_addr, + grc_param), + storm->cm_conn_st_ctx_lid_size, + storm->cm_conn_st_ctx_rd_addr, storm_id); /* Dump Task AG context size */ + grc_param = DBG_GRC_PARAM_NUM_LTIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3019,14 +3317,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "TASK_AG_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS), - s_storm_defs[storm_id]. - cm_task_ag_ctx_lid_size, - s_storm_defs[storm_id]. - cm_task_ag_ctx_rd_addr, + grc_param), + storm->cm_task_ag_ctx_lid_size, + storm->cm_task_ag_ctx_rd_addr, storm_id); /* Dump Task ST context size */ + grc_param = DBG_GRC_PARAM_NUM_LTIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3034,11 +3331,9 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "TASK_ST_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS), - s_storm_defs[storm_id]. - cm_task_st_ctx_lid_size, - s_storm_defs[storm_id]. - cm_task_st_ctx_rd_addr, + grc_param), + storm->cm_task_st_ctx_lid_size, + storm->cm_task_st_ctx_rd_addr, storm_id); } @@ -3050,8 +3345,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { char buf[10] = "IOR_SET_?"; + u32 addr, offset = 0; u8 storm_id, set_id; - u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { struct storm_defs *storm = &s_storm_defs[storm_id]; @@ -3061,11 +3356,9 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, continue; for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { - u32 dwords, addr; - - dwords = storm->sem_fast_mem_addr + - SEM_FAST_REG_STORM_REG_FILE; - addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id); + addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + + SEM_FAST_REG_STORM_REG_FILE) + + IOR_SET_OFFSET(set_id); buf[strlen(buf) - 1] = '0' + set_id; offset += qed_grc_dump_mem(p_hwfn, p_ptt, @@ -3074,6 +3367,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, buf, addr, IORS_PER_SET, + false, 32, false, "ior", @@ -3091,10 +3385,10 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u8 storm_id) { u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; - u32 offset = 0; - u32 row, i; + u32 row, i, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3103,38 +3397,34 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, 0, total_size, 256, - false, - "vfc_cam", - true, s_storm_defs[storm_id].letter); - if (dump) { - /* Prepare CAM address */ - SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); - for (row = 0; row < VFC_CAM_NUM_ROWS; - row++, offset += VFC_CAM_RESP_DWORDS) { - /* Write VFC CAM command */ - SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); - ARR_REG_WR(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_WR, - cam_cmd, VFC_CAM_CMD_DWORDS); + false, "vfc_cam", true, storm->letter); - /* Write VFC CAM address */ - ARR_REG_WR(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_ADDR, - cam_addr, VFC_CAM_ADDR_DWORDS); + if (!dump) + return offset + total_size; - /* Read VFC CAM read response */ - ARR_REG_RD(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_RD, - dump_buf + offset, VFC_CAM_RESP_DWORDS); - } - } else { - offset += total_size; + /* Prepare CAM address */ + SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); + + for (row = 0; row < VFC_CAM_NUM_ROWS; + row++, offset += VFC_CAM_RESP_DWORDS) { + /* Write VFC CAM command */ + SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); + ARR_REG_WR(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, + cam_cmd, VFC_CAM_CMD_DWORDS); + + /* Write VFC CAM address */ + ARR_REG_WR(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, + cam_addr, VFC_CAM_ADDR_DWORDS); + + /* Read VFC CAM read response */ + ARR_REG_RD(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, + dump_buf + offset, VFC_CAM_RESP_DWORDS); } return offset; @@ -3148,10 +3438,10 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, u8 storm_id, struct vfc_ram_defs *ram_defs) { u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; - u32 offset = 0; - u32 row, i; + u32 row, i, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3162,7 +3452,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, 256, false, ram_defs->type_name, - true, s_storm_defs[storm_id].letter); + true, storm->letter); /* Prepare RAM address */ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); @@ -3176,23 +3466,20 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, /* Write VFC RAM command */ ARR_REG_WR(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_WR, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS); /* Write VFC RAM address */ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); ARR_REG_WR(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_ADDR, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS); /* Read VFC RAM read response */ ARR_REG_RD(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_RD, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS); } @@ -3208,28 +3495,27 @@ static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - if (qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id) && - s_storm_defs[storm_id].has_vfc && - (storm_id != DBG_PSTORM_ID || - dev_data->platform_id == PLATFORM_ASIC)) { - /* Read CAM */ - offset += qed_grc_dump_vfc_cam(p_hwfn, + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id) || + !s_storm_defs[storm_id].has_vfc || + (storm_id == DBG_PSTORM_ID && dev_data->platform_id != + PLATFORM_ASIC)) + continue; + + /* Read CAM */ + offset += qed_grc_dump_vfc_cam(p_hwfn, + p_ptt, + dump_buf + offset, + dump, storm_id); + + /* Read RAM */ + for (i = 0; i < NUM_VFC_RAM_TYPES; i++) + offset += qed_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, - dump, storm_id); - - /* Read RAM */ - for (i = 0; i < NUM_VFC_RAM_TYPES; i++) - offset += qed_grc_dump_vfc_ram(p_hwfn, - p_ptt, - dump_buf + - offset, - dump, - storm_id, - &s_vfc_ram_defs - [i]); - } + dump, + storm_id, + &s_vfc_ram_defs[i]); } return offset; @@ -3244,14 +3530,17 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, u8 rss_mem_id; for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { - struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id]; - u32 num_entries = rss_defs->num_entries[dev_data->chip_id]; - u32 entry_width = rss_defs->entry_width[dev_data->chip_id]; - u32 total_dwords = (num_entries * entry_width) / 32; - u32 size = RSS_REG_RSS_RAM_DATA_SIZE; - bool packed = (entry_width == 16); - u32 rss_addr = rss_defs->addr; - u32 i, addr; + u32 rss_addr, num_entries, entry_width, total_dwords, i; + struct rss_mem_defs *rss_defs; + u32 addr, size; + bool packed; + + rss_defs = &s_rss_mem_defs[rss_mem_id]; + rss_addr = rss_defs->addr; + num_entries = rss_defs->num_entries[dev_data->chip_id]; + entry_width = rss_defs->entry_width[dev_data->chip_id]; + total_dwords = (num_entries * entry_width) / 32; + packed = (entry_width == 16); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3263,23 +3552,23 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, packed, rss_defs->type_name, false, 0); + /* Dump RSS data */ if (!dump) { offset += total_dwords; continue; } - /* Dump RSS data */ - for (i = 0; i < total_dwords; - i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) { - addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + size = RSS_REG_RSS_RAM_DATA_SIZE; + for (i = 0; i < total_dwords; i += size, rss_addr++) { qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); - offset += qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + - offset, - dump, - addr, - size); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + size, + false); } } @@ -3316,10 +3605,11 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0); + /* Read and dump Big RAM data */ if (!dump) return offset + ram_size; - /* Read and dump Big RAM data */ + /* Dump Big RAM */ for (i = 0; i < total_blocks / 2; i++) { u32 addr, len; @@ -3331,7 +3621,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - len); + len, + false); } return offset; @@ -3359,7 +3650,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, - 0, false, "MCP", false, 0); + false, 0, false, "MCP", false, 0); /* Dump MCP cpu_reg_file */ offset += qed_grc_dump_mem(p_hwfn, @@ -3369,7 +3660,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, - 0, false, "MCP", false, 0); + false, 0, false, "MCP", false, 0); /* Dump MCP registers */ block_enable[BLOCK_MCP] = true; @@ -3387,11 +3678,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1); + 1, + false); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); + return offset; } @@ -3404,14 +3697,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, u8 phy_id; for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) { - struct phy_defs *phy_defs = &s_phy_defs[phy_id]; - int printed_chars; - - printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s", - phy_defs->phy_name); - if (printed_chars < 0 || printed_chars >= sizeof(mem_name)) + u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr; + struct phy_defs *phy_defs; + u8 *bytes_buf; + + phy_defs = &s_phy_defs[phy_id]; + addr_lo_addr = phy_defs->base_addr + + phy_defs->tbus_addr_lo_addr; + addr_hi_addr = phy_defs->base_addr + + phy_defs->tbus_addr_hi_addr; + data_lo_addr = phy_defs->base_addr + + phy_defs->tbus_data_lo_addr; + data_hi_addr = phy_defs->base_addr + + phy_defs->tbus_data_hi_addr; + bytes_buf = (u8 *)(dump_buf + offset); + + if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", + phy_defs->phy_name) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid PHY memory name\n"); + offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, @@ -3419,34 +3724,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0); - if (dump) { - u32 addr_lo_addr = phy_defs->base_addr + - phy_defs->tbus_addr_lo_addr; - u32 addr_hi_addr = phy_defs->base_addr + - phy_defs->tbus_addr_hi_addr; - u32 data_lo_addr = phy_defs->base_addr + - phy_defs->tbus_data_lo_addr; - u32 data_hi_addr = phy_defs->base_addr + - phy_defs->tbus_data_hi_addr; - u8 *bytes_buf = (u8 *)(dump_buf + offset); - - for (tbus_hi_offset = 0; - tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); - tbus_hi_offset++) { + + if (!dump) { + offset += PHY_DUMP_SIZE_DWORDS; + continue; + } + + for (tbus_hi_offset = 0; + tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); + tbus_hi_offset++) { + qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset); + for (tbus_lo_offset = 0; tbus_lo_offset < 256; + tbus_lo_offset++) { qed_wr(p_hwfn, - p_ptt, addr_hi_addr, tbus_hi_offset); - for (tbus_lo_offset = 0; tbus_lo_offset < 256; - tbus_lo_offset++) { - qed_wr(p_hwfn, - p_ptt, - addr_lo_addr, tbus_lo_offset); - *(bytes_buf++) = - (u8)qed_rd(p_hwfn, p_ptt, - data_lo_addr); - *(bytes_buf++) = - (u8)qed_rd(p_hwfn, p_ptt, - data_hi_addr); - } + p_ptt, addr_lo_addr, tbus_lo_offset); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_lo_addr); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_hi_addr); } } @@ -3460,16 +3757,17 @@ static void qed_config_dbg_line(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum block_id block_id, u8 line_id, - u8 cycle_en, - u8 right_shift, u8 force_valid, u8 force_frame) + u8 enable_mask, + u8 right_shift, + u8 force_valid_mask, u8 force_frame_mask) { - struct block_defs *p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame); + qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id); + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask); + qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift); + qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask); + qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask); } /* Dumps Static Debug data. Returns the dumped size in dwords. */ @@ -3477,10 +3775,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 offset = 0, block_id, line_id; - struct block_defs *p_block_defs; + u32 block_id, line_id, offset = 0; + + /* Skip static debug if a debug bus recording is in progress */ + if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) + return 0; if (dump) { DP_VERBOSE(p_hwfn, @@ -3488,11 +3788,11 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Disable all blocks debug output */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; - if (p_block_defs->has_dbg_bus[dev_data->chip_id]) - qed_wr(p_hwfn, p_ptt, - p_block_defs->dbg_cycle_enable_addr, 0); + if (block->has_dbg_bus[dev_data->chip_id]) + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, + 0); } qed_bus_reset_dbg_block(p_hwfn, p_ptt); @@ -3506,59 +3806,71 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Dump all static debug lines for each relevant block */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; + struct dbg_bus_block *block_desc; + u32 block_dwords, addr, len; + u8 dbg_client_id; - if (!p_block_defs->has_dbg_bus[dev_data->chip_id]) + if (!block->has_dbg_bus[dev_data->chip_id]) continue; + block_desc = + get_dbg_bus_block_desc(p_hwfn, + (enum block_id)block_id); + block_dwords = NUM_DBG_LINES(block_desc) * + STATIC_DEBUG_LINE_DWORDS; + /* Dump static section params */ offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, - p_block_defs->name, 0, - block_dwords, 32, false, - "STATIC", false, 0); - - if (dump && !dev_data->block_in_reset[block_id]) { - u8 dbg_client_id = - p_block_defs->dbg_client_id[dev_data->chip_id]; - u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); - u32 len = STATIC_DEBUG_LINE_DWORDS; - - /* Enable block's client */ - qed_bus_enable_clients(p_hwfn, p_ptt, - BIT(dbg_client_id)); - - for (line_id = 0; line_id < NUM_DBG_BUS_LINES; - line_id++) { - /* Configure debug line ID */ - qed_config_dbg_line(p_hwfn, - p_ptt, - (enum block_id)block_id, - (u8)line_id, - 0xf, 0, 0, 0); + block->name, + 0, + block_dwords, + 32, false, "STATIC", false, 0); - /* Read debug line info */ - offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - len); - } + if (!dump) { + offset += block_dwords; + continue; + } - /* Disable block's client and debug output */ - qed_bus_enable_clients(p_hwfn, p_ptt, 0); - qed_wr(p_hwfn, p_ptt, - p_block_defs->dbg_cycle_enable_addr, 0); - } else { - /* All lines are invalid - dump zeros */ - if (dump) - memset(dump_buf + offset, 0, - DWORDS_TO_BYTES(block_dwords)); + /* If all lines are invalid - dump zeros */ + if (dev_data->block_in_reset[block_id]) { + memset(dump_buf + offset, 0, + DWORDS_TO_BYTES(block_dwords)); offset += block_dwords; + continue; } + + /* Enable block's client */ + dbg_client_id = block->dbg_client_id[dev_data->chip_id]; + qed_bus_enable_clients(p_hwfn, + p_ptt, + BIT(dbg_client_id)); + + addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); + len = STATIC_DEBUG_LINE_DWORDS; + for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); + line_id++) { + /* Configure debug line ID */ + qed_config_dbg_line(p_hwfn, + p_ptt, + (enum block_id)block_id, + (u8)line_id, 0xf, 0, 0, 0); + + /* Read debug line info */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + true); + } + + /* Disable block's client and debug output */ + qed_bus_enable_clients(p_hwfn, p_ptt, 0); + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0); } if (dump) { @@ -3584,8 +3896,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, *num_dumped_dwords = 0; - /* Find port mode */ if (dump) { + /* Find port mode */ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { case 0: port_mode = 1; @@ -3597,11 +3909,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, port_mode = 4; break; } - } - /* Update reset state */ - if (dump) + /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); + } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3635,7 +3946,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, } /* Disable all parities using MFW command */ - if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); if (!parities_masked) { DP_NOTICE(p_hwfn, @@ -3661,9 +3973,9 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, /* Dump all regs */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) { - /* Dump all blocks except MCP */ bool block_enable[MAX_BLOCK_ID]; + /* Dump all blocks except MCP */ for (i = 0; i < MAX_BLOCK_ID; i++) block_enable[i] = true; block_enable[BLOCK_MCP] = false; @@ -3732,7 +4044,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); /* Dump last section */ - offset += qed_dump_last_section(dump_buf, offset, dump); + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + if (dump) { /* Unstall storms */ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL)) @@ -3763,19 +4076,20 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, const struct dbg_idle_chk_rule *rule, u16 fail_entry_id, u32 *cond_reg_values) { - const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *) - s_dbg_arrays - [BIN_BUF_DBG_IDLE_CHK_REGS]. - ptr)[rule->reg_offset]; - const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct dbg_idle_chk_result_hdr *hdr = - (struct dbg_idle_chk_result_hdr *)dump_buf; - const struct dbg_idle_chk_info_reg *info_regs = - ®s[rule->num_cond_regs].info_reg; - u32 next_reg_offset = 0, i, offset = 0; + const struct dbg_idle_chk_cond_reg *cond_regs; + const struct dbg_idle_chk_info_reg *info_regs; + u32 i, next_reg_offset = 0, offset = 0; + struct dbg_idle_chk_result_hdr *hdr; + const union dbg_idle_chk_reg *regs; u8 reg_id; + hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; + regs = &((const union dbg_idle_chk_reg *) + s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset]; + cond_regs = ®s[0].cond_reg; + info_regs = ®s[rule->num_cond_regs].info_reg; + /* Dump rule data */ if (dump) { memset(hdr, 0, sizeof(*hdr)); @@ -3790,33 +4104,31 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, /* Dump condition register values */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; + struct dbg_idle_chk_result_reg_hdr *reg_hdr; - /* Write register header */ - if (dump) { - struct dbg_idle_chk_result_reg_hdr *reg_hdr = - (struct dbg_idle_chk_result_reg_hdr *)(dump_buf - + offset); - offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; - memset(reg_hdr, 0, - sizeof(struct dbg_idle_chk_result_reg_hdr)); - reg_hdr->start_entry = reg->start_entry; - reg_hdr->size = reg->entry_size; - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, - reg->num_entries > 1 || reg->start_entry > 0 - ? 1 : 0); - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); + reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) + (dump_buf + offset); - /* Write register values */ - for (i = 0; i < reg_hdr->size; - i++, next_reg_offset++, offset++) - dump_buf[offset] = - cond_reg_values[next_reg_offset]; - } else { + /* Write register header */ + if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size; + continue; } + + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->start_entry = reg->start_entry; + reg_hdr->size = reg->entry_size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, + reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0); + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); + + /* Write register values */ + for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++) + dump_buf[offset] = cond_reg_values[next_reg_offset]; } /* Dump info register values */ @@ -3824,12 +4136,12 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id]; u32 block_id; + /* Check if register's block is in reset */ if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size; continue; } - /* Check if register's block is in reset */ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); @@ -3837,47 +4149,50 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, } if (!dev_data->block_in_reset[block_id]) { - bool eval_mode = GET_FIELD(reg->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - bool mode_match = true; + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool wide_bus, eval_mode, mode_match = true; + u16 modes_buf_offset; + u32 addr; + + reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) + (dump_buf + offset); /* Check mode */ + eval_mode = GET_FIELD(reg->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = - GET_FIELD(reg->mode.data, - DBG_MODE_HDR_MODES_BUF_OFFSET); + modes_buf_offset = + GET_FIELD(reg->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } - if (mode_match) { - u32 addr = - GET_FIELD(reg->data, - DBG_IDLE_CHK_INFO_REG_ADDRESS); - - /* Write register header */ - struct dbg_idle_chk_result_reg_hdr *reg_hdr = - (struct dbg_idle_chk_result_reg_hdr *) - (dump_buf + offset); - - offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; - hdr->num_dumped_info_regs++; - memset(reg_hdr, 0, sizeof(*reg_hdr)); - reg_hdr->size = reg->size; - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, - rule->num_cond_regs + reg_id); - - /* Write register values */ - offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - reg->size); - } + if (!mode_match) + continue; + + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_ADDRESS); + wide_bus = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_WIDE_BUS); + + /* Write register header */ + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + hdr->num_dumped_info_regs++; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->size = reg->size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, + rule->num_cond_regs + reg_id); + + /* Write register values */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + reg->size, wide_bus); } } @@ -3898,6 +4213,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 reg_id; *num_failing_rules = 0; + for (i = 0; i < num_input_rules; i++) { const struct dbg_idle_chk_cond_reg *cond_regs; const struct dbg_idle_chk_rule *rule; @@ -3920,8 +4236,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, */ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) { - u32 block_id = GET_FIELD(cond_regs[reg_id].data, - DBG_IDLE_CHK_COND_REG_BLOCK_ID); + u32 block_id = + GET_FIELD(cond_regs[reg_id].data, + DBG_IDLE_CHK_COND_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); @@ -3936,48 +4253,47 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (!check_rule && dump) continue; - if (!dump) { - u32 entry_dump_size = - qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - false, - rule->rule_id, - rule, - 0, - NULL); - - offset += num_reg_entries * entry_dump_size; - (*num_failing_rules) += num_reg_entries; - continue; - } - /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { - /* Read current entry of all condition registers */ u32 next_reg_offset = 0; + if (!dump) { + offset += qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + entry_id, + NULL); + (*num_failing_rules)++; + break; + } + + /* Read current entry of all condition registers */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = - &cond_regs[reg_id]; + &cond_regs[reg_id]; + u32 padded_entry_size, addr; + bool wide_bus; - /* Find GRC address (if it's a memory,the + /* Find GRC address (if it's a memory, the * address of the specific entry is calculated). */ - u32 addr = + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_COND_REG_ADDRESS); + wide_bus = GET_FIELD(reg->data, - DBG_IDLE_CHK_COND_REG_ADDRESS); - + DBG_IDLE_CHK_COND_REG_WIDE_BUS); if (reg->num_entries > 1 || reg->start_entry > 0) { - u32 padded_entry_size = - reg->entry_size > 1 ? - roundup_pow_of_two(reg->entry_size) : - 1; - + padded_entry_size = + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) + : 1; addr += (reg->start_entry + entry_id) * padded_entry_size; } @@ -3991,28 +4307,27 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } next_reg_offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, + qed_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, - reg->entry_size); + reg->entry_size, + wide_bus); } - /* Call rule's condition function - a return value of - * true indicates failure. + /* Call rule condition function. + * If returns true, it's a failure. */ - if ((*cond_arr[rule->cond_id])(cond_reg_values, - imm_values)) { - offset += - qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - rule->rule_id, - rule, - entry_id, - cond_reg_values); + if ((*cond_arr[rule->cond_id]) (cond_reg_values, + imm_values)) { + offset += qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rule->rule_id, + rule, + entry_id, + cond_reg_values); (*num_failing_rules)++; break; } @@ -4028,8 +4343,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - u32 offset = 0, input_offset = 0, num_failing_rules = 0; - u32 num_failing_rules_offset; + u32 num_failing_rules_offset, offset = 0, input_offset = 0; + u32 num_failing_rules = 0; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -4042,29 +4357,29 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1); num_failing_rules_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); + while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) { const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *) &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr [input_offset++]; - bool eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - bool mode_match = true; + bool eval_mode, mode_match = true; + u32 curr_failing_rules; + u16 modes_buf_offset; /* Check mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (mode_match) { - u32 curr_failing_rules; - offset += qed_idle_chk_dump_rule_entries(p_hwfn, p_ptt, @@ -4086,10 +4401,13 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules); + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + return offset; } -/* Finds the meta data image in NVRAM. */ +/* Finds the meta data image in NVRAM */ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 image_type, @@ -4098,16 +4416,16 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, { u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; struct mcp_file_att file_att; + int nvm_result; /* Call NVRAM get file command */ - int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, - p_ptt, - DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, - &ret_mcp_resp, - &ret_mcp_param, - &ret_txn_size, - (u32 *)&file_att); + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, (u32 *)&file_att); /* Check response */ if (nvm_result || @@ -4117,6 +4435,7 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, /* Update return values */ *nvram_offset_bytes = file_att.nvm_start_addr; *nvram_size_bytes = file_att.len; + DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", @@ -4125,22 +4444,25 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, /* Check alignment */ if (*nvram_size_bytes & 0x3) return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + return DBG_STATUS_OK; } +/* Reads data from NVRAM */ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_bytes, u32 nvram_size_bytes, u32 *ret_buf) { - u32 ret_mcp_resp, ret_mcp_param, ret_read_size; - u32 bytes_to_copy, read_offset = 0; + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes); + do { bytes_to_copy = (bytes_left > @@ -4155,8 +4477,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, DRV_MB_PARAM_NVM_LEN_SHIFT), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, - (u32 *)((u8 *)ret_buf + - read_offset)) != 0) + (u32 *)((u8 *)ret_buf + read_offset))) return DBG_STATUS_NVRAM_READ_FAILED; /* Check response */ @@ -4172,24 +4493,20 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, } /* Get info on the MCP Trace data in the scratchpad: - * - trace_data_grc_addr - the GRC address of the trace data - * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without - * the header) + * - trace_data_grc_addr (OUT): trace data GRC address in bytes + * - trace_data_size (OUT): trace data size in bytes (without the header) */ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *trace_data_grc_addr, - u32 *trace_data_size_bytes) + u32 *trace_data_size) { - /* Read MCP trace section offsize structure from MCP scratchpad */ - u32 spad_trace_offsize = qed_rd(p_hwfn, - p_ptt, - MCP_SPAD_TRACE_OFFSIZE_ADDR); - u32 signature; + u32 spad_trace_offsize, signature; - /* Extract MCP trace section GRC address from offsize structure (within - * scratchpad). - */ + /* Read trace section offsize structure from MCP scratchpad */ + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); + + /* Extract trace section address from offsize (in scratchpad) */ *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize); @@ -4197,42 +4514,41 @@ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, signature = qed_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, signature)); + if (signature != MFW_TRACE_SIGNATURE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read trace size from MCP trace section */ - *trace_data_size_bytes = qed_rd(p_hwfn, - p_ptt, - *trace_data_grc_addr + - offsetof(struct mcp_trace, size)); + *trace_data_size = qed_rd(p_hwfn, + p_ptt, + *trace_data_grc_addr + + offsetof(struct mcp_trace, size)); + return DBG_STATUS_OK; } -/* Reads MCP trace meta data image from NVRAM. - * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from - * file) - * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP - * Trace meta data starts (invalid when loaded from file) - * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data +/* Reads MCP trace meta data image from NVRAM + * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file) + * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when + * loaded from file). + * - trace_meta_size (OUT): size in bytes of the trace meta data. */ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 trace_data_size_bytes, u32 *running_bundle_id, - u32 *trace_meta_offset_bytes, - u32 *trace_meta_size_bytes) + u32 *trace_meta_offset, + u32 *trace_meta_size) { + u32 spad_trace_offsize, nvram_image_type, running_mfw_addr; + /* Read MCP trace section offsize structure from MCP scratchpad */ - u32 spad_trace_offsize = qed_rd(p_hwfn, - p_ptt, - MCP_SPAD_TRACE_OFFSIZE_ADDR); + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); /* Find running bundle ID */ - u32 running_mfw_addr = + running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; - u32 nvram_image_type; - *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); if (*running_bundle_id > 1) return DBG_STATUS_INVALID_NVRAM_BUNDLE; @@ -4241,40 +4557,33 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; - return qed_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, - trace_meta_offset_bytes, - trace_meta_size_bytes); + trace_meta_offset, trace_meta_size); } -/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified - * buffer. - */ +/* Reads the MCP Trace meta data from NVRAM into the specified buffer */ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_in_bytes, u32 size_in_bytes, u32 *buf) { - u8 *byte_buf = (u8 *)buf; - u8 modules_num, i; + u8 modules_num, module_len, i, *byte_buf = (u8 *)buf; + enum dbg_status status; u32 signature; /* Read meta data from NVRAM */ - enum dbg_status status = qed_nvram_read(p_hwfn, - p_ptt, - nvram_offset_in_bytes, - size_in_bytes, - buf); - + status = qed_nvram_read(p_hwfn, + p_ptt, + nvram_offset_in_bytes, size_in_bytes, buf); if (status != DBG_STATUS_OK) return status; /* Extract and check first signature */ signature = qed_read_unaligned_dword(byte_buf); - byte_buf += sizeof(u32); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Extract number of modules */ @@ -4282,16 +4591,16 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, /* Skip all modules */ for (i = 0; i < modules_num; i++) { - u8 module_len = *(byte_buf++); - + module_len = *(byte_buf++); byte_buf += module_len; } /* Extract and check second signature */ signature = qed_read_unaligned_dword(byte_buf); - byte_buf += sizeof(u32); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; + return DBG_STATUS_OK; } @@ -4308,10 +4617,10 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, bool mcp_access; int halted = 0; - mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); - *num_dumped_dwords = 0; + mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); + /* Get trace data info */ status = qed_mcp_trace_get_data_info(p_hwfn, p_ptt, @@ -4328,7 +4637,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump, "dump-type", "mcp-trace"); /* Halt MCP while reading from scratchpad so the read data will be - * consistent if halt fails, MCP trace is taken anyway, with a small + * consistent. if halt fails, MCP trace is taken anyway, with a small * risk that it may be corrupt. */ if (dump && mcp_access) { @@ -4339,8 +4648,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Find trace data size */ trace_data_size_dwords = - DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), - BYTES_IN_DWORD); + DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), + BYTES_IN_DWORD); /* Dump trace data section header and param */ offset += qed_dump_section_hdr(dump_buf + offset, @@ -4354,17 +4663,17 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords); + trace_data_size_dwords, false); /* Resume MCP (only if halt succeeded) */ - if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0) + if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); /* Dump trace meta section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1); - /* Read trace meta info */ + /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */ if (mcp_access) { status = qed_mcp_trace_get_meta_info(p_hwfn, p_ptt, @@ -4391,6 +4700,9 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, if (status == DBG_STATUS_OK) offset += trace_meta_size_dwords; + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + *num_dumped_dwords = offset; /* If no mcp access, indicate that the dump doesn't contain the meta @@ -4405,7 +4717,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, dwords_read, size_param_offset; + u32 dwords_read, size_param_offset, offset = 0; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4417,8 +4729,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo"); - /* Dump fifo data section header and param. The size param is 0 for now, - * and is overwritten after reading the FIFO. + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1); @@ -4430,8 +4742,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, * test how much data is available, except for reading it. */ offset += REG_FIFO_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, @@ -4456,8 +4767,12 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4467,7 +4782,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, dwords_read, size_param_offset; + u32 dwords_read, size_param_offset, offset = 0; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4479,8 +4794,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo"); - /* Dump fifo data section header and param. The size param is 0 for now, - * and is overwritten after reading the FIFO. + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1); @@ -4492,8 +4807,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, * test how much data is available, except for reading it. */ offset += IGU_FIFO_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, @@ -4519,8 +4833,12 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4531,7 +4849,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, size_param_offset, override_window_dwords; + u32 size_param_offset, override_window_dwords, offset = 0; *num_dumped_dwords = 0; @@ -4542,8 +4860,8 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override"); - /* Dump data section header and param. The size param is 0 for now, and - * is overwritten after reading the data. + /* Dump data section header and param. The size param is 0 for now, + * and is overwritten after reading the data. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1); @@ -4552,8 +4870,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, if (!dump) { offset += PROTECTION_OVERRIDE_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } /* Add override window info to buffer */ @@ -4569,8 +4886,12 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, offset += override_window_dwords; qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4593,11 +4914,14 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts"); + + /* Find Storm dump size */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 last_list_idx, addr; - if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id]) + if (dev_data->block_in_reset[storm->block_id]) continue; /* Read FW info for the current Storm */ @@ -4606,26 +4930,26 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, asserts = &fw_info.fw_asserts_section; /* Dump FW Asserts section header and params */ - storm_letter_str[0] = s_storm_defs[storm_id].letter; - offset += qed_dump_section_hdr(dump_buf + offset, dump, - "fw_asserts", 2); - offset += qed_dump_str_param(dump_buf + offset, dump, "storm", - storm_letter_str); - offset += qed_dump_num_param(dump_buf + offset, dump, "size", + storm_letter_str[0] = storm->letter; + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "fw_asserts", 2); + offset += qed_dump_str_param(dump_buf + offset, + dump, "storm", storm_letter_str); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "size", asserts->list_element_dword_size); + /* Read and dump FW Asserts data */ if (!dump) { offset += asserts->list_element_dword_size; continue; } - /* Read and dump FW Asserts data */ - fw_asserts_section_addr = - s_storm_defs[storm_id].sem_fast_mem_addr + + fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + RAM_LINES_TO_BYTES(asserts->section_ram_line_offset); - next_list_idx_addr = - fw_asserts_section_addr + + next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); last_list_idx = (next_list_idx > 0 @@ -4638,11 +4962,13 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, - asserts->list_element_dword_size); + asserts->list_element_dword_size, + false); } /* Dump last section */ - offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + return offset; } @@ -4650,10 +4976,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) { - /* Convert binary data to debug arrays */ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; + /* convert binary data to debug arrays */ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset); @@ -4682,14 +5008,17 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; + return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4702,12 +5031,14 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_grc_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4724,25 +5055,31 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct idle_chk_data *idle_chk; + enum dbg_status status; + idle_chk = &dev_data->idle_chk; *buf_size = 0; + + status = qed_dbg_dev_init(p_hwfn, p_ptt); if (status != DBG_STATUS_OK) return status; + if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; - if (!dev_data->idle_chk.buf_size_set) { - dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn, - p_ptt, - NULL, false); - dev_data->idle_chk.buf_size_set = true; + + if (!idle_chk->buf_size_set) { + idle_chk->buf_size = qed_idle_chk_dump(p_hwfn, + p_ptt, NULL, false); + idle_chk->buf_size_set = true; } - *buf_size = dev_data->idle_chk.buf_size; + *buf_size = idle_chk->buf_size; + return DBG_STATUS_OK; } @@ -4755,12 +5092,14 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4783,8 +5122,10 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4797,13 +5138,12 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - /* validate buffer size */ status = - qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - - if (status != DBG_STATUS_OK && - status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) + qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK && status != + DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) @@ -4829,8 +5169,10 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4843,12 +5185,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4871,8 +5215,10 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4885,12 +5231,14 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4913,8 +5261,10 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_protection_override_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4925,15 +5275,18 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = + qed_dbg_protection_override_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4958,12 +5311,15 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); + *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false); + return DBG_STATUS_OK; } @@ -4973,24 +5329,108 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = + qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block_id, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + u8 reg_idx, num_attn_regs, num_result_regs = 0; + const struct dbg_attn_reg *attn_reg_arr; + + if (status != DBG_STATUS_OK) + return status; + + if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || + !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + attn_reg_arr = qed_get_block_attn_regs(block_id, + attn_type, &num_attn_regs); + + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { + const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; + struct dbg_attn_reg_result *reg_result; + u32 sts_addr, sts_val; + u16 modes_buf_offset; + bool eval_mode; + + /* Check mode */ + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset)) + continue; + + /* Mode match - read attention status register */ + sts_addr = DWORDS_TO_BYTES(clear_status ? + reg_data->sts_clr_address : + GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS)); + sts_val = qed_rd(p_hwfn, p_ptt, sts_addr); + if (!sts_val) + continue; + + /* Non-zero attention status - add to results */ + reg_result = &results->reg_results[num_result_regs]; + SET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr); + SET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_NUM_REG_ATTN, + GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN)); + reg_result->block_attn_offset = reg_data->block_attn_offset; + reg_result->sts_val = sts_val; + reg_result->mask_val = qed_rd(p_hwfn, + p_ptt, + DWORDS_TO_BYTES + (reg_data->mask_address)); + num_result_regs++; + } + + results->block_id = (u8)block_id; + results->names_offset = + qed_get_block_attn_data(block_id, attn_type)->names_offset; + SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type); + SET_FIELD(results->data, + DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs); + return DBG_STATUS_OK; } /******************************* Data Types **********************************/ +struct block_info { + const char *name; + enum block_id id; +}; + struct mcp_trace_format { u32 data; #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff @@ -5005,9 +5445,14 @@ struct mcp_trace_format { #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 #define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; }; +/* Meta data structure, generated by a perl script during MFW build. therefore, + * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl + * script. + */ struct mcp_trace_meta { u32 modules_num; char **modules; @@ -5015,7 +5460,7 @@ struct mcp_trace_meta { struct mcp_trace_format *formats; }; -/* Reg fifo element */ +/* REG fifo element */ struct reg_fifo_element { u64 data; #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0 @@ -5140,12 +5585,15 @@ struct igu_fifo_addr_data { /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 + #define MCP_TRACE_MAX_MODULE_LEN 8 #define MCP_TRACE_FORMAT_MAX_PARAMS 3 #define MCP_TRACE_FORMAT_PARAM_WIDTH \ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT) + #define REG_FIFO_ELEMENT_ADDR_FACTOR 4 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 + #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 /********************************* Macros ************************************/ @@ -5154,59 +5602,269 @@ struct igu_fifo_addr_data { /***************************** Constant Arrays *******************************/ +struct user_dbg_array { + const u32 *ptr; + u32 size_in_dwords; +}; + +/* Debug arrays */ +static struct user_dbg_array +s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; + +/* Block names array */ +static struct block_info s_block_info_arr[] = { + {"grc", BLOCK_GRC}, + {"miscs", BLOCK_MISCS}, + {"misc", BLOCK_MISC}, + {"dbu", BLOCK_DBU}, + {"pglue_b", BLOCK_PGLUE_B}, + {"cnig", BLOCK_CNIG}, + {"cpmu", BLOCK_CPMU}, + {"ncsi", BLOCK_NCSI}, + {"opte", BLOCK_OPTE}, + {"bmb", BLOCK_BMB}, + {"pcie", BLOCK_PCIE}, + {"mcp", BLOCK_MCP}, + {"mcp2", BLOCK_MCP2}, + {"pswhst", BLOCK_PSWHST}, + {"pswhst2", BLOCK_PSWHST2}, + {"pswrd", BLOCK_PSWRD}, + {"pswrd2", BLOCK_PSWRD2}, + {"pswwr", BLOCK_PSWWR}, + {"pswwr2", BLOCK_PSWWR2}, + {"pswrq", BLOCK_PSWRQ}, + {"pswrq2", BLOCK_PSWRQ2}, + {"pglcs", BLOCK_PGLCS}, + {"ptu", BLOCK_PTU}, + {"dmae", BLOCK_DMAE}, + {"tcm", BLOCK_TCM}, + {"mcm", BLOCK_MCM}, + {"ucm", BLOCK_UCM}, + {"xcm", BLOCK_XCM}, + {"ycm", BLOCK_YCM}, + {"pcm", BLOCK_PCM}, + {"qm", BLOCK_QM}, + {"tm", BLOCK_TM}, + {"dorq", BLOCK_DORQ}, + {"brb", BLOCK_BRB}, + {"src", BLOCK_SRC}, + {"prs", BLOCK_PRS}, + {"tsdm", BLOCK_TSDM}, + {"msdm", BLOCK_MSDM}, + {"usdm", BLOCK_USDM}, + {"xsdm", BLOCK_XSDM}, + {"ysdm", BLOCK_YSDM}, + {"psdm", BLOCK_PSDM}, + {"tsem", BLOCK_TSEM}, + {"msem", BLOCK_MSEM}, + {"usem", BLOCK_USEM}, + {"xsem", BLOCK_XSEM}, + {"ysem", BLOCK_YSEM}, + {"psem", BLOCK_PSEM}, + {"rss", BLOCK_RSS}, + {"tmld", BLOCK_TMLD}, + {"muld", BLOCK_MULD}, + {"yuld", BLOCK_YULD}, + {"xyld", BLOCK_XYLD}, + {"ptld", BLOCK_PTLD}, + {"ypld", BLOCK_YPLD}, + {"prm", BLOCK_PRM}, + {"pbf_pb1", BLOCK_PBF_PB1}, + {"pbf_pb2", BLOCK_PBF_PB2}, + {"rpb", BLOCK_RPB}, + {"btb", BLOCK_BTB}, + {"pbf", BLOCK_PBF}, + {"rdif", BLOCK_RDIF}, + {"tdif", BLOCK_TDIF}, + {"cdu", BLOCK_CDU}, + {"ccfc", BLOCK_CCFC}, + {"tcfc", BLOCK_TCFC}, + {"igu", BLOCK_IGU}, + {"cau", BLOCK_CAU}, + {"rgfs", BLOCK_RGFS}, + {"rgsrc", BLOCK_RGSRC}, + {"tgfs", BLOCK_TGFS}, + {"tgsrc", BLOCK_TGSRC}, + {"umac", BLOCK_UMAC}, + {"xmac", BLOCK_XMAC}, + {"dbg", BLOCK_DBG}, + {"nig", BLOCK_NIG}, + {"wol", BLOCK_WOL}, + {"bmbn", BLOCK_BMBN}, + {"ipc", BLOCK_IPC}, + {"nwm", BLOCK_NWM}, + {"nws", BLOCK_NWS}, + {"ms", BLOCK_MS}, + {"phy_pcie", BLOCK_PHY_PCIE}, + {"led", BLOCK_LED}, + {"avs_wrap", BLOCK_AVS_WRAP}, + {"misc_aeu", BLOCK_MISC_AEU}, + {"bar0_map", BLOCK_BAR0_MAP} +}; + /* Status string array */ static const char * const s_status_str[] = { + /* DBG_STATUS_OK */ "Operation completed successfully", + + /* DBG_STATUS_APP_VERSION_NOT_SET */ "Debug application version wasn't set", + + /* DBG_STATUS_UNSUPPORTED_APP_VERSION */ "Unsupported debug application version", + + /* DBG_STATUS_DBG_BLOCK_NOT_RESET */ "The debug block wasn't reset since the last recording", + + /* DBG_STATUS_INVALID_ARGS */ "Invalid arguments", + + /* DBG_STATUS_OUTPUT_ALREADY_SET */ "The debug output was already set", + + /* DBG_STATUS_INVALID_PCI_BUF_SIZE */ "Invalid PCI buffer size", + + /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */ "PCI buffer allocation failed", + + /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */ "A PCI buffer wasn't allocated", + + /* DBG_STATUS_TOO_MANY_INPUTS */ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true", - "GRC/Timestamp input overlap in cycle dword 0", + + /* DBG_STATUS_INPUT_OVERLAP */ + "Overlapping debug bus inputs", + + /* DBG_STATUS_HW_ONLY_RECORDING */ "Cannot record Storm data since the entire recording cycle is used by HW", + + /* DBG_STATUS_STORM_ALREADY_ENABLED */ "The Storm was already enabled", + + /* DBG_STATUS_STORM_NOT_ENABLED */ "The specified Storm wasn't enabled", + + /* DBG_STATUS_BLOCK_ALREADY_ENABLED */ "The block was already enabled", + + /* DBG_STATUS_BLOCK_NOT_ENABLED */ "The specified block wasn't enabled", + + /* DBG_STATUS_NO_INPUT_ENABLED */ "No input was enabled for recording", + + /* DBG_STATUS_NO_FILTER_TRIGGER_64B */ "Filters and triggers are not allowed when recording in 64b units", + + /* DBG_STATUS_FILTER_ALREADY_ENABLED */ "The filter was already enabled", + + /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */ "The trigger was already enabled", + + /* DBG_STATUS_TRIGGER_NOT_ENABLED */ "The trigger wasn't enabled", + + /* DBG_STATUS_CANT_ADD_CONSTRAINT */ "A constraint can be added only after a filter was enabled or a trigger state was added", + + /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */ "Cannot add more than 3 trigger states", + + /* DBG_STATUS_TOO_MANY_CONSTRAINTS */ "Cannot add more than 4 constraints per filter or trigger state", + + /* DBG_STATUS_RECORDING_NOT_STARTED */ "The recording wasn't started", + + /* DBG_STATUS_DATA_DIDNT_TRIGGER */ "A trigger was configured, but it didn't trigger", + + /* DBG_STATUS_NO_DATA_RECORDED */ "No data was recorded", + + /* DBG_STATUS_DUMP_BUF_TOO_SMALL */ "Dump buffer is too small", + + /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */ "Dumped data is not aligned to chunks", + + /* DBG_STATUS_UNKNOWN_CHIP */ "Unknown chip", + + /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */ "Failed allocating virtual memory", + + /* DBG_STATUS_BLOCK_IN_RESET */ "The input block is in reset", + + /* DBG_STATUS_INVALID_TRACE_SIGNATURE */ "Invalid MCP trace signature found in NVRAM", + + /* DBG_STATUS_INVALID_NVRAM_BUNDLE */ "Invalid bundle ID found in NVRAM", + + /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */ "Failed getting NVRAM image", + + /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */ "NVRAM image is not dword-aligned", + + /* DBG_STATUS_NVRAM_READ_FAILED */ "Failed reading from NVRAM", + + /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */ "Idle check parsing failed", + + /* DBG_STATUS_MCP_TRACE_BAD_DATA */ "MCP Trace data is corrupt", - "Dump doesn't contain meta data - it must be provided in an image file", + + /* DBG_STATUS_MCP_TRACE_NO_META */ + "Dump doesn't contain meta data - it must be provided in image file", + + /* DBG_STATUS_MCP_COULD_NOT_HALT */ "Failed to halt MCP", + + /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", + + /* DBG_STATUS_DMAE_FAILED */ "DMAE transaction failed", + + /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", + + /* DBG_STATUS_IGU_FIFO_BAD_DATA */ "IGU FIFO data is corrupt", + + /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */ "MCP failed to mask parities", + + /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */ "FW Asserts parsing failed", + + /* DBG_STATUS_REG_FIFO_BAD_DATA */ "GRC FIFO data is corrupt", + + /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */ "Protection Override data is corrupt", + + /* DBG_STATUS_DBG_ARRAY_NOT_SET */ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", - "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)" + + /* DBG_STATUS_FILTER_BUG */ + "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)", + + /* DBG_STATUS_NON_MATCHING_LINES */ + "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)", + + /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */ + "The selected trigger dword offset wasn't enabled in the recorded HW block", + + /* DBG_STATUS_DBG_BUS_IN_USE */ + "The debug bus is in use" }; /* Idle check severity names array */ @@ -5223,12 +5881,13 @@ static const char * const s_mcp_trace_level_str[] = { "DEBUG" }; -/* Parsing strings */ +/* Access type names array */ static const char * const s_access_strs[] = { "read", "write" }; +/* Privilege type names array */ static const char * const s_privilege_strs[] = { "VF", "PDA", @@ -5236,6 +5895,7 @@ static const char * const s_privilege_strs[] = { "UA" }; +/* Protection type names array */ static const char * const s_protection_strs[] = { "(default)", "(default)", @@ -5247,6 +5907,7 @@ static const char * const s_protection_strs[] = { "override UA" }; +/* Master type names array */ static const char * const s_master_strs[] = { "???", "pxp", @@ -5266,6 +5927,7 @@ static const char * const s_master_strs[] = { "???" }; +/* REG FIFO error messages array */ static const char * const s_reg_fifo_error_strs[] = { "grc timeout", "address doesn't belong to any block", @@ -5274,6 +5936,7 @@ static const char * const s_reg_fifo_error_strs[] = { "path isolation error" }; +/* IGU FIFO sources array */ static const char * const s_igu_fifo_source_strs[] = { "TSTORM", "MSTORM", @@ -5288,6 +5951,7 @@ static const char * const s_igu_fifo_source_strs[] = { "GRC", }; +/* IGU FIFO error messages */ static const char * const s_igu_fifo_error_strs[] = { "no error", "length error", @@ -5308,13 +5972,18 @@ static const char * const s_igu_fifo_error_strs[] = { /* IGU FIFO address data */ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { - {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM}, - {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, - {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA}, + {0x0, 0x101, "MSI-X Memory", NULL, + IGU_ADDR_TYPE_MSIX_MEM}, + {0x102, 0x1ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x200, 0x200, "Write PBA[0:63]", NULL, + IGU_ADDR_TYPE_WRITE_PBA}, {0x201, 0x201, "Write PBA[64:127]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, - {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, - {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, + {0x202, 0x202, "Write PBA[128]", "reserved", + IGU_ADDR_TYPE_WRITE_PBA}, + {0x203, 0x3ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, {0x400, 0x5ef, "Write interrupt acknowledgment", NULL, IGU_ADDR_TYPE_WRITE_INT_ACK}, {0x5f0, 0x5f0, "Attention bits update", NULL, @@ -5331,8 +6000,10 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { IGU_ADDR_TYPE_READ_INT}, {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL, IGU_ADDR_TYPE_READ_INT}, - {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, - {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE} + {0x5f7, 0x5ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x600, 0x7ff, "Producer update", NULL, + IGU_ADDR_TYPE_WRITE_PROD_UPDATE} }; /******************************** Variables **********************************/ @@ -5340,28 +6011,12 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /* MCP Trace meta data - used in case the dump doesn't contain the meta data * (e.g. due to no NVRAM access). */ -static struct dbg_array s_mcp_trace_meta = { NULL, 0 }; +static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 }; /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; -/***************************** Public Functions *******************************/ - -enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) -{ - /* Convert binary data to debug arrays */ - struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; - u8 buf_id; - - for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { - s_dbg_arrays[buf_id].ptr = - (u32 *)(bin_ptr + buf_array[buf_id].offset); - s_dbg_arrays[buf_id].size_in_dwords = - BYTES_TO_DWORDS(buf_array[buf_id].length); - } - - return DBG_STATUS_OK; -} +/**************************** Private Functions ******************************/ static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { @@ -5381,10 +6036,8 @@ static u32 qed_read_from_cyclic_buf(void *buf, u32 *offset, u32 buf_size, u8 num_bytes_to_read) { - u8 *bytes_buf = (u8 *)buf; - u8 *val_ptr; + u8 i, *val_ptr, *bytes_buf = (u8 *)buf; u32 val = 0; - u8 i; val_ptr = (u8 *)&val; @@ -5412,6 +6065,7 @@ static u32 qed_read_dword_from_buf(void *buf, u32 *offset) u32 dword_val = *(u32 *)&((u8 *)buf)[*offset]; *offset += 4; + return dword_val; } @@ -5445,7 +6099,7 @@ static u32 qed_read_param(u32 *dump_buf, const char **param_str_val, u32 *param_num_val) { char *char_buf = (char *)dump_buf; - u32 offset = 0; /* In bytes */ + size_t offset = 0; /* Extract param name */ *param_name = char_buf; @@ -5493,37 +6147,31 @@ static u32 qed_print_section_params(u32 *dump_buf, u32 i, dump_offset = 0, results_offset = 0; for (i = 0; i < num_section_params; i++) { - const char *param_name; - const char *param_str_val; + const char *param_name, *param_str_val; u32 param_num_val = 0; dump_offset += qed_read_param(dump_buf + dump_offset, ¶m_name, ¶m_str_val, ¶m_num_val); + if (param_str_val) - /* String param */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %s\n", param_name, param_str_val); else if (strcmp(param_name, "fw-timestamp")) - /* Numeric param */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %d\n", param_name, param_num_val); } - results_offset += - sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); + results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), + "\n"); + *num_chars_printed = results_offset; - return dump_offset; -} -const char *qed_dbg_get_status_str(enum dbg_status status) -{ - return (status < - MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; + return dump_offset; } /* Parses the idle check rules and returns the number of characters printed. @@ -5537,7 +6185,10 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, char *results_buf, u32 *num_errors, u32 *num_warnings) { - u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */ + /* Offset in results_buf in bytes */ + u32 results_offset = 0; + + u32 rule_idx; u16 i, j; *num_errors = 0; @@ -5548,16 +6199,15 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, rule_idx++) { const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data; struct dbg_idle_chk_result_hdr *hdr; - const char *parsing_str; + const char *parsing_str, *lsi_msg; u32 parsing_str_offset; - const char *lsi_msg; - u8 curr_reg_id = 0; bool has_fw_msg; + u8 curr_reg_id; hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; rule_parsing_data = (const struct dbg_idle_chk_rule_parsing_data *) - &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. + &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. ptr[hdr->rule_id]; parsing_str_offset = GET_FIELD(rule_parsing_data->data, @@ -5565,16 +6215,18 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, has_fw_msg = GET_FIELD(rule_parsing_data->data, DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; - parsing_str = &((const char *) - s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) - [parsing_str_offset]; + parsing_str = + &((const char *) + s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) + [parsing_str_offset]; lsi_msg = parsing_str; + curr_reg_id = 0; if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES) return 0; /* Skip rule header */ - dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4); + dump_buf += BYTES_TO_DWORDS(sizeof(*hdr)); /* Update errors/warnings count */ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR || @@ -5606,19 +6258,19 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, for (i = 0; i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs; i++) { - struct dbg_idle_chk_result_reg_hdr *reg_hdr - = (struct dbg_idle_chk_result_reg_hdr *) - dump_buf; - bool is_mem = - GET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); - u8 reg_id = - GET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool is_mem; + u8 reg_id; + + reg_hdr = + (struct dbg_idle_chk_result_reg_hdr *)dump_buf; + is_mem = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); + reg_id = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); /* Skip reg header */ - dump_buf += - (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4); + dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr)); /* Skip register names until the required reg_id is * reached. @@ -5660,6 +6312,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, /* Check if end of dump buffer was exceeded */ if (dump_buf > dump_buf_end) return 0; + return results_offset; } @@ -5680,13 +6333,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; u32 num_section_params = 0, num_rules; - u32 results_offset = 0; /* Offset in results_buf in bytes */ + + /* Offset in results_buf in bytes */ + u32 results_offset = 0; *parsed_results_bytes = 0; *num_errors = 0; *num_warnings = 0; - if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) + + if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || + !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; /* Read global_params section */ @@ -5705,10 +6361,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, §ion_name, &num_section_params); if (strcmp(section_name, "idle_chk") || num_section_params != 1) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; - dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, &num_rules); - if (strcmp(param_name, "num_rules") != 0) + if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; if (num_rules) { @@ -5728,7 +6383,7 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; - if (rules_print_size == 0) + if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; /* Print LSI output */ @@ -5745,64 +6400,33 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; - if (rules_print_size == 0) + if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; } /* Print errors/warnings count */ - if (*num_errors) { + if (*num_errors) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check failed!!! (with %d errors and %d warnings)\n", *num_errors, *num_warnings); - } else if (*num_warnings) { + else if (*num_warnings) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "\nIdle Check completed successfuly (with %d warnings)\n", + "\nIdle Check completed successfully (with %d warnings)\n", *num_warnings); - } else { + else results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "\nIdle Check completed successfuly\n"); - } + "\nIdle Check completed successfully\n"); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; - return DBG_STATUS_OK; -} -enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - u32 num_errors, num_warnings; - - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, - results_buf_size, - &num_errors, &num_warnings); -} - -enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf, - u32 *num_errors, u32 *num_warnings) -{ - u32 parsed_buf_size; - - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, - &parsed_buf_size, - num_errors, num_warnings); + return DBG_STATUS_OK; } /* Frees the specified MCP Trace meta data */ @@ -5841,12 +6465,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, /* Read first signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; - /* Read number of modules and allocate memory for all the modules - * pointers. - */ + /* Read no. of modules and allocate memory for their pointers */ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset); meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL); if (!meta->modules) @@ -5871,7 +6493,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, /* Read second signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read number of formats and allocate memory for all formats */ @@ -5919,10 +6541,10 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_mask, param_shift, param_num_val; - u32 num_section_params, offset, end_offset, bytes_left; + u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords; + u32 param_mask, param_shift, param_num_val, num_section_params; const char *section_name, *param_name, *param_str_val; - u32 trace_data_dwords, trace_meta_dwords; + u32 offset, results_offset = 0; struct mcp_trace_meta meta; struct mcp_trace *trace; enum dbg_status status; @@ -5955,7 +6577,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; - trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace); + trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; end_offset = trace->trace_prod; bytes_left = qed_cyclic_sub(end_offset, offset, trace->size); @@ -5968,7 +6590,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, return DBG_STATUS_MCP_TRACE_BAD_DATA; dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, ¶m_num_val); - if (strcmp(param_name, "size") != 0) + if (strcmp(param_name, "size")) return DBG_STATUS_MCP_TRACE_BAD_DATA; trace_meta_dwords = param_num_val; @@ -6028,6 +6650,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, } format_ptr = &meta.formats[format_idx]; + for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT; @@ -6050,6 +6673,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, */ if (param_size == 3) param_size = 4; + if (bytes_left < param_size) { status = DBG_STATUS_MCP_TRACE_BAD_DATA; goto free_mem; @@ -6059,13 +6683,14 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, &offset, trace->size, param_size); + bytes_left -= param_size; } format_level = (u8)((format_ptr->data & MCP_TRACE_FORMAT_LEVEL_MASK) >> - MCP_TRACE_FORMAT_LEVEL_SHIFT); + MCP_TRACE_FORMAT_LEVEL_SHIFT); format_module = (u8)((format_ptr->data & MCP_TRACE_FORMAT_MODULE_MASK) >> @@ -6094,30 +6719,6 @@ free_mem: return status; } -enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} - -enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - - return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); -} - /* Parses a Reg FIFO dump buffer. * If result_buf is not NULL, the Reg FIFO results are printed to it. * In any case, the required results buffer size is assigned to @@ -6130,10 +6731,11 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct reg_fifo_element *elements; u8 i, j, err_val, vf_val; + u32 results_offset = 0; char vf_str[4]; /* Read global_params section */ @@ -6179,17 +6781,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", elements[i].data, (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_ADDRESS) * - REG_FIFO_ELEMENT_ADDR_FACTOR, - s_access_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_ADDRESS) * + REG_FIFO_ELEMENT_ADDR_FACTOR, + s_access_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ACCESS)], (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_PF), vf_str, + REG_FIFO_ELEMENT_PF), + vf_str, (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_PORT), - s_privilege_strs[GET_FIELD(elements[i]. - data, - REG_FIFO_ELEMENT_PRIVILEGE)], + REG_FIFO_ELEMENT_PORT), + s_privilege_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PRIVILEGE)], s_protection_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PROTECTION)], s_master_strs[GET_FIELD(elements[i].data, @@ -6201,18 +6803,18 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, REG_FIFO_ELEMENT_ERROR); j < ARRAY_SIZE(s_reg_fifo_error_strs); j++, err_val >>= 1) { - if (!(err_val & 0x1)) - continue; - if (err_printed) + if (err_val & 0x1) { + if (err_printed) + results_offset += + sprintf(qed_get_buf_ptr + (results_buf, + results_offset), ", "); results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), - ", "); - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), "%s", - s_reg_fifo_error_strs[j]); - err_printed = true; + sprintf(qed_get_buf_ptr + (results_buf, results_offset), "%s", + s_reg_fifo_error_strs[j]); + err_printed = true; + } } results_offset += @@ -6225,31 +6827,140 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; + return DBG_STATUS_OK; } -enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) +static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element + *element, char + *results_buf, + u32 *results_offset, + u32 *parsed_results_bytes) { - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} + const struct igu_fifo_addr_data *found_addr = NULL; + u8 source, err_type, i, is_cleanup; + char parsed_addr_data[32]; + char parsed_wr_data[256]; + u32 wr_data, prod_cons; + bool is_wr_cmd, is_pf; + u16 cmd_addr; + u64 dword12; -enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; + /* Dword12 (dword index 1 and 2) contains bits 32..95 of the + * FIFO element. + */ + dword12 = ((u64)element->dword2 << 32) | element->dword1; + is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); + is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF); + cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); + source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE); + err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); + + if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + /* Find address data */ + for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) { + const struct igu_fifo_addr_data *curr_addr = + &s_igu_fifo_addr_data[i]; + + if (cmd_addr >= curr_addr->start_addr && cmd_addr <= + curr_addr->end_addr) + found_addr = curr_addr; + } + + if (!found_addr) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + + /* Prepare parsed address data */ + switch (found_addr->type) { + case IGU_ADDR_TYPE_MSIX_MEM: + sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2); + break; + case IGU_ADDR_TYPE_WRITE_INT_ACK: + case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: + sprintf(parsed_addr_data, + " SB = 0x%x", cmd_addr - found_addr->start_addr); + break; + default: + parsed_addr_data[0] = '\0'; + } + + if (!is_wr_cmd) { + parsed_wr_data[0] = '\0'; + goto out; + } + + /* Prepare parsed write data */ + wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA); + prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS); + is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE); + + if (source == IGU_SRC_ATTN) { + sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons); + } else { + if (is_cleanup) { + u8 cleanup_val, cleanup_type; + + cleanup_val = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); + cleanup_type = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); + + sprintf(parsed_wr_data, + "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ", + cleanup_val ? "set" : "clear", + cleanup_type); + } else { + u8 update_flag, en_dis_int_for_sb, segment; + u8 timer_mask; + + update_flag = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_UPDATE_FLAG); + en_dis_int_for_sb = + GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); + segment = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_SEGMENT); + timer_mask = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_TIMER_MASK); + + sprintf(parsed_wr_data, + "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", + prod_cons, + update_flag ? "update" : "nop", + en_dis_int_for_sb + ? (en_dis_int_for_sb == 1 ? "disable" : "nop") + : "enable", + segment ? "attn" : "regular", + timer_mask); + } + } +out: + /* Add parsed element to parsed buffer */ + *results_offset += sprintf(qed_get_buf_ptr(results_buf, + *results_offset), + "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n", + element->dword2, element->dword1, + element->dword0, + is_pf ? "pf" : "vf", + GET_FIELD(element->dword0, + IGU_FIFO_ELEMENT_DWORD0_FID), + s_igu_fifo_source_strs[source], + is_wr_cmd ? "wr" : "rd", + cmd_addr, + (!is_pf && found_addr->vf_desc) + ? found_addr->vf_desc + : found_addr->desc, + parsed_addr_data, + parsed_wr_data, + s_igu_fifo_error_strs[err_type]); + + return DBG_STATUS_OK; } /* Parses an IGU FIFO dump buffer. @@ -6264,12 +6975,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct igu_fifo_element *elements; - char parsed_addr_data[32]; - char parsed_wr_data[256]; - u8 i, j; + enum dbg_status status; + u32 results_offset = 0; + u8 i; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6298,118 +7009,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, /* Decode elements */ for (i = 0; i < num_elements; i++) { - /* dword12 (dword index 1 and 2) contains bits 32..95 of the - * FIFO element. - */ - u64 dword12 = - ((u64)elements[i].dword2 << 32) | elements[i].dword1; - bool is_wr_cmd = GET_FIELD(dword12, - IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); - bool is_pf = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_IS_PF); - u16 cmd_addr = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); - u8 source = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_SOURCE); - u8 err_type = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); - const struct igu_fifo_addr_data *addr_data = NULL; - - if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - - /* Find address data */ - for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data; - j++) - if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr && - cmd_addr <= s_igu_fifo_addr_data[j].end_addr) - addr_data = &s_igu_fifo_addr_data[j]; - if (!addr_data) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - - /* Prepare parsed address data */ - switch (addr_data->type) { - case IGU_ADDR_TYPE_MSIX_MEM: - sprintf(parsed_addr_data, - " vector_num=0x%x", cmd_addr / 2); - break; - case IGU_ADDR_TYPE_WRITE_INT_ACK: - case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: - sprintf(parsed_addr_data, - " SB=0x%x", cmd_addr - addr_data->start_addr); - break; - default: - parsed_addr_data[0] = '\0'; - } - - /* Prepare parsed write data */ - if (is_wr_cmd) { - u32 wr_data = GET_FIELD(dword12, - IGU_FIFO_ELEMENT_DWORD12_WR_DATA); - u32 prod_cons = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_PROD_CONS); - u8 is_cleanup = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_CMD_TYPE); - - if (source == IGU_SRC_ATTN) { - sprintf(parsed_wr_data, - "prod: 0x%x, ", prod_cons); - } else { - if (is_cleanup) { - u8 cleanup_val = GET_FIELD(wr_data, - IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); - u8 cleanup_type = GET_FIELD(wr_data, - IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); - - sprintf(parsed_wr_data, - "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ", - cleanup_val ? "set" : "clear", - cleanup_type); - } else { - u8 update_flag = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_UPDATE_FLAG); - u8 en_dis_int_for_sb = - GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); - u8 segment = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_SEGMENT); - u8 timer_mask = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_TIMER_MASK); - - sprintf(parsed_wr_data, - "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ", - prod_cons, - update_flag ? "update" : "nop", - en_dis_int_for_sb - ? (en_dis_int_for_sb == - 1 ? "disable" : "nop") : - "enable", - segment ? "attn" : "regular", - timer_mask); - } - } - } else { - parsed_wr_data[0] = '\0'; - } - - /* Add parsed element to parsed buffer */ - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), - "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n", - elements[i].dword2, elements[i].dword1, - elements[i].dword0, - is_pf ? "pf" : "vf", - GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_FID), - s_igu_fifo_source_strs[source], - is_wr_cmd ? "wr" : "rd", cmd_addr, - (!is_pf && addr_data->vf_desc) - ? addr_data->vf_desc : addr_data->desc, - parsed_addr_data, parsed_wr_data, - s_igu_fifo_error_strs[err_type]); + status = qed_parse_igu_fifo_element(&elements[i], + results_buf, + &results_offset, + parsed_results_bytes); + if (status != DBG_STATUS_OK) + return status; } results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6418,31 +7023,8 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; - return DBG_STATUS_OK; -} - -enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} - -enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return DBG_STATUS_OK; } static enum dbg_status @@ -6452,9 +7034,10 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct protection_override_element *elements; + u32 results_offset = 0; u8 i; /* Read global_params section */ @@ -6477,7 +7060,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, ¶m_name, ¶m_str_val, ¶m_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; - if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0) + if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS; elements = (struct protection_override_element *)dump_buf; @@ -6486,7 +7069,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, for (i = 0; i < num_elements; i++) { u32 address = GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_ADDRESS) * - PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; + PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6512,33 +7095,8 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; - return DBG_STATUS_OK; -} -enum dbg_status -qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} - -enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, - &parsed_buf_size); + return DBG_STATUS_OK; } /* Parses a FW Asserts dump buffer. @@ -6553,7 +7111,7 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, num_section_params, param_num_val, i; + u32 num_section_params, param_num_val, i, results_offset = 0; const char *param_name, *param_str_val, *section_name; bool last_section_found = false; @@ -6569,54 +7127,216 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); - while (!last_section_found) { - const char *storm_letter = NULL; - u32 storm_dump_size = 0; + while (!last_section_found) { dump_buf += qed_read_section_hdr(dump_buf, §ion_name, &num_section_params); - if (!strcmp(section_name, "last")) { - last_section_found = true; - continue; - } else if (strcmp(section_name, "fw_asserts")) { - return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - } + if (!strcmp(section_name, "fw_asserts")) { + /* Extract params */ + const char *storm_letter = NULL; + u32 storm_dump_size = 0; + + for (i = 0; i < num_section_params; i++) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + ¶m_num_val); + if (!strcmp(param_name, "storm")) + storm_letter = param_str_val; + else if (!strcmp(param_name, "size")) + storm_dump_size = param_num_val; + else + return + DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } - /* Extract params */ - for (i = 0; i < num_section_params; i++) { - dump_buf += qed_read_param(dump_buf, - ¶m_name, - ¶m_str_val, - ¶m_num_val); - if (!strcmp(param_name, "storm")) - storm_letter = param_str_val; - else if (!strcmp(param_name, "size")) - storm_dump_size = param_num_val; - else + if (!storm_letter || !storm_dump_size) return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - } - if (!storm_letter || !storm_dump_size) - return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - - /* Print data */ - results_offset += sprintf(qed_get_buf_ptr(results_buf, - results_offset), - "\n%sSTORM_ASSERT: size=%d\n", - storm_letter, storm_dump_size); - for (i = 0; i < storm_dump_size; i++, dump_buf++) + /* Print data */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "%08x\n", *dump_buf); + "\n%sSTORM_ASSERT: size=%d\n", + storm_letter, storm_dump_size); + for (i = 0; i < storm_dump_size; i++, dump_buf++) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "%08x\n", *dump_buf); + } else if (!strcmp(section_name, "last")) { + last_section_found = true; + } else { + return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } } /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +/***************************** Public Functions *******************************/ + +enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) +{ + struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; + u8 buf_id; + + /* Convert binary data to debug arrays */ + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { + s_user_dbg_arrays[buf_id].ptr = + (u32 *)(bin_ptr + buf_array[buf_id].offset); + s_user_dbg_arrays[buf_id].size_in_dwords = + BYTES_TO_DWORDS(buf_array[buf_id].length); + } + return DBG_STATUS_OK; } +const char *qed_dbg_get_status_str(enum dbg_status status) +{ + return (status < + MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; +} + +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + u32 num_errors, num_warnings; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, + results_buf_size, + &num_errors, &num_warnings); +} + +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, u32 *num_warnings) +{ + u32 parsed_buf_size; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, + &parsed_buf_size, + num_errors, num_warnings); +} + +void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) +{ + s_mcp_trace_meta.ptr = data; + s_mcp_trace_meta.size_in_dwords = size; +} + +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_reg_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_reg_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_igu_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_igu_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_protection_override_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_protection_override_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, + &parsed_buf_size); +} + enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, @@ -6641,6 +7361,88 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, results_buf, &parsed_buf_size); } +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results) +{ + struct user_dbg_array *block_attn, *pstrings; + const u32 *block_attn_name_offsets; + enum dbg_attn_type attn_type; + const char *block_name; + u8 num_regs, i, j; + + num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS); + attn_type = (enum dbg_attn_type) + GET_FIELD(results->data, + DBG_ATTN_BLOCK_RESULT_ATTN_TYPE); + block_name = s_block_info_arr[results->block_id].name; + + if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr || + !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr || + !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS]; + block_attn_name_offsets = &block_attn->ptr[results->names_offset]; + + /* Go over registers with a non-zero attention status */ + for (i = 0; i < num_regs; i++) { + struct dbg_attn_reg_result *reg_result; + struct dbg_attn_bit_mapping *mapping; + u8 num_reg_attn, bit_idx = 0; + + reg_result = &results->reg_results[i]; + num_reg_attn = GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_NUM_REG_ATTN); + block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES]; + mapping = &((struct dbg_attn_bit_mapping *) + block_attn->ptr)[reg_result->block_attn_offset]; + + pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS]; + + /* Go over attention status bits */ + for (j = 0; j < num_reg_attn; j++) { + u16 attn_idx_val = GET_FIELD(mapping[j].data, + DBG_ATTN_BIT_MAPPING_VAL); + const char *attn_name, *attn_type_str, *masked_str; + u32 name_offset, sts_addr; + + /* Check if bit mask should be advanced (due to unused + * bits). + */ + if (GET_FIELD(mapping[j].data, + DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) { + bit_idx += (u8)attn_idx_val; + continue; + } + + /* Check current bit index */ + if (!(reg_result->sts_val & BIT(bit_idx))) { + bit_idx++; + continue; + } + + /* Find attention name */ + name_offset = block_attn_name_offsets[attn_idx_val]; + attn_name = &((const char *) + pstrings->ptr)[name_offset]; + attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ? + "Interrupt" : "Parity"; + masked_str = reg_result->mask_val & BIT(bit_idx) ? + " [masked]" : ""; + sts_addr = GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS); + DP_NOTICE(p_hwfn, + "%s (%s) : %s [address 0x%08x, bit %d]%s\n", + block_name, attn_type_str, attn_name, + sts_addr, bit_idx, masked_str); + + bit_idx++; + } + } + + return DBG_STATUS_OK; +} + /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index f872d7324814..ea1cc8eaa125 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -20,6 +20,9 @@ enum qed_dbg_features { DBG_FEATURE_NUM }; +/* Forward Declaration */ +struct qed_dev; + int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_grc_size(struct qed_dev *cdev); int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 463927f17032..6c87bed13bd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -62,19 +62,13 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" -#include "qed_roce.h" +#include "qed_rdma.h" static DEFINE_SPINLOCK(qm_lock); #define QED_MIN_DPIS (4) #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) -/* API common to all protocols */ -enum BAR_ID { - BAR_ID_0, /* used for GRC */ - BAR_ID_1 /* Used for doorbells */ -}; - static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum BAR_ID bar_id) { @@ -83,7 +77,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, u32 val; if (IS_VF(p_hwfn->cdev)) - return 1 << 17; + return qed_vf_hw_bar_size(p_hwfn, bar_id); val = qed_rd(p_hwfn, p_ptt, bar_reg); if (val) @@ -154,13 +148,17 @@ void qed_resc_free(struct qed_dev *cdev) { int i; - if (IS_VF(cdev)) + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) + qed_l2_free(&cdev->hwfns[i]); return; + } kfree(cdev->fw_data); cdev->fw_data = NULL; kfree(cdev->reset_stats); + cdev->reset_stats = NULL; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -168,20 +166,21 @@ void qed_resc_free(struct qed_dev *cdev) qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); qed_spq_free(p_hwfn); - qed_eq_free(p_hwfn, p_hwfn->p_eq); - qed_consq_free(p_hwfn, p_hwfn->p_consq); + qed_eq_free(p_hwfn); + qed_consq_free(p_hwfn); qed_int_free(p_hwfn); #ifdef CONFIG_QED_LL2 - qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); + qed_ll2_free(p_hwfn); #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) - qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); + qed_fcoe_free(p_hwfn); if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { - qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); - qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); + qed_iscsi_free(p_hwfn); + qed_ooo_free(p_hwfn); } qed_iov_free(p_hwfn); + qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); } @@ -217,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) case QED_PCI_ETH_ROCE: flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; break; + case QED_PCI_ETH_IWARP: + flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | + PQ_FLAGS_OFLD; + break; default: DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); @@ -299,7 +302,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) qm_info->vport_wfq_en = 1; /* TC config is different for AH 4 port */ - four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; + four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; /* in AH 4 port we have fewer TCs per port */ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : @@ -328,7 +331,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) { /* Initialize qm port parameters */ - u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; + u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; /* indicate how ooo and high pri traffic is dealt with */ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? @@ -692,7 +695,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); /* port table */ - for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { + for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) { port = &(qm_info->qm_port_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, @@ -822,7 +825,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) goto alloc_err; qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * - p_hwfn->cdev->num_ports_in_engines, + p_hwfn->cdev->num_ports_in_engine, GFP_KERNEL); if (!qm_info->qm_port_params) goto alloc_err; @@ -843,20 +846,18 @@ alloc_err: int qed_resc_alloc(struct qed_dev *cdev) { - struct qed_iscsi_info *p_iscsi_info; - struct qed_fcoe_info *p_fcoe_info; - struct qed_ooo_info *p_ooo_info; -#ifdef CONFIG_QED_LL2 - struct qed_ll2_info *p_ll2_info; -#endif u32 rdma_tasks, excess_tasks; - struct qed_consq *p_consq; - struct qed_eq *p_eq; u32 line_count; int i, rc = 0; - if (IS_VF(cdev)) + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) { + rc = qed_l2_alloc(&cdev->hwfns[i]); + if (rc) + return rc; + } return rc; + } cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); if (!cdev->fw_data) @@ -939,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev) /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + enum protocol_type rdma_proto; + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + rdma_proto = PROTOCOLID_ROCE; + else + rdma_proto = PROTOCOLID_IWARP; + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, - PROTOCOLID_ROCE, + rdma_proto, NULL) * 2; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { @@ -956,45 +964,42 @@ int qed_resc_alloc(struct qed_dev *cdev) DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", n_eqes, 0xFFFF); - rc = -EINVAL; - goto alloc_err; + goto alloc_no_mem; } - p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); - if (!p_eq) - goto alloc_no_mem; - p_hwfn->p_eq = p_eq; + rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + if (rc) + goto alloc_err; - p_consq = qed_consq_alloc(p_hwfn); - if (!p_consq) - goto alloc_no_mem; - p_hwfn->p_consq = p_consq; + rc = qed_consq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + rc = qed_l2_alloc(p_hwfn); + if (rc) + goto alloc_err; #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) { - p_ll2_info = qed_ll2_alloc(p_hwfn); - if (!p_ll2_info) - goto alloc_no_mem; - p_hwfn->p_ll2_info = p_ll2_info; + rc = qed_ll2_alloc(p_hwfn); + if (rc) + goto alloc_err; } #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { - p_fcoe_info = qed_fcoe_alloc(p_hwfn); - if (!p_fcoe_info) - goto alloc_no_mem; - p_hwfn->p_fcoe_info = p_fcoe_info; + rc = qed_fcoe_alloc(p_hwfn); + if (rc) + goto alloc_err; } if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { - p_iscsi_info = qed_iscsi_alloc(p_hwfn); - if (!p_iscsi_info) - goto alloc_no_mem; - p_hwfn->p_iscsi_info = p_iscsi_info; - p_ooo_info = qed_ooo_alloc(p_hwfn); - if (!p_ooo_info) - goto alloc_no_mem; - p_hwfn->p_ooo_info = p_ooo_info; + rc = qed_iscsi_alloc(p_hwfn); + if (rc) + goto alloc_err; + rc = qed_ooo_alloc(p_hwfn); + if (rc) + goto alloc_err; } /* DMA info initialization */ @@ -1025,16 +1030,19 @@ void qed_resc_setup(struct qed_dev *cdev) { int i; - if (IS_VF(cdev)) + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) + qed_l2_setup(&cdev->hwfns[i]); return; + } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_cxt_mngr_setup(p_hwfn); qed_spq_setup(p_hwfn); - qed_eq_setup(p_hwfn, p_hwfn->p_eq); - qed_consq_setup(p_hwfn, p_hwfn->p_consq); + qed_eq_setup(p_hwfn); + qed_consq_setup(p_hwfn); /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); @@ -1044,17 +1052,18 @@ void qed_resc_setup(struct qed_dev *cdev) qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); - qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); + qed_l2_setup(p_hwfn); + qed_iov_setup(p_hwfn); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) - qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); + qed_ll2_setup(p_hwfn); #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) - qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); + qed_fcoe_setup(p_hwfn); if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { - qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); - qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); + qed_iscsi_setup(p_hwfn); + qed_ooo_setup(p_hwfn); } } } @@ -1122,7 +1131,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) return -EINVAL; } - switch (p_hwfn->cdev->num_ports_in_engines) { + switch (p_hwfn->cdev->num_ports_in_engine) { case 1: hw_mode |= 1 << MODE_PORTS_PER_ENG_1; break; @@ -1134,7 +1143,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) break; default: DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", - p_hwfn->cdev->num_ports_in_engines); + p_hwfn->cdev->num_ports_in_engine); return -EINVAL; } @@ -1169,7 +1178,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) static void qed_init_cau_rt_data(struct qed_dev *cdev) { u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; - int i, sb_id; + int i, igu_sb_id; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1179,15 +1188,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) p_igu_info = p_hwfn->hw_info.p_igu_info; - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); - sb_id++) { - p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { + p_block = &p_igu_info->entry[igu_sb_id]; + if (!p_block->is_pf) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_block->function_id, 0, 0); - STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); + STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, + sb_entry); } } } @@ -1241,6 +1252,10 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, L1_CACHE_BYTES, wr_mbs); STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); + if (val > 0) { + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); + } } static int qed_hw_init_common(struct qed_hwfn *p_hwfn, @@ -1267,7 +1282,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, } memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; + params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.pf_rl_en = qm_info->pf_rl_en; params.pf_wfq_en = qm_info->pf_wfq_en; @@ -1447,8 +1462,15 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_hw_init_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { - return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, - p_hwfn->port_id, hw_mode); + int rc = 0; + + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); + if (rc) + return rc; + + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); + + return 0; } static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, @@ -1527,7 +1549,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ - rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, + rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, + p_hwfn->cdev->mf_mode, allow_npar_tx_switch); if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); @@ -1711,6 +1734,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) return mfw_rc; } + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, QED_MSG_DCB, @@ -1956,6 +1984,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) if (!p_ptt) return -EAGAIN; + /* If roce info is allocated it means roce is initialized and should + * be enabled in searcher. + */ + if (p_hwfn->p_rdma_info && + p_hwfn->b_rdma_enabled_in_prs) + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); + /* Re-open incoming traffic */ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); qed_ptt_release(p_hwfn, p_ptt); @@ -1968,6 +2003,7 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) { qed_ptt_pool_free(p_hwfn); kfree(p_hwfn->hw_info.p_igu_info); + p_hwfn->hw_info.p_igu_info = NULL; } /* Setup bar access */ @@ -2025,51 +2061,55 @@ static void get_function_id(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; - struct qed_sb_cnt_info sb_cnt_info; + struct qed_sb_cnt_info sb_cnt; u32 non_l2_sbs = 0; + memset(&sb_cnt, 0, sizeof(sb_cnt)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt); + if (IS_ENABLED(CONFIG_QED_RDMA) && - p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + QED_IS_RDMA_PERSONALITY(p_hwfn)) { /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide * the status blocks equally between L2 / RoCE but with * consideration as to how many l2 queues / cnqs we have. */ feat_num[QED_RDMA_CNQ] = - min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, + min_t(u32, sb_cnt.cnt / 2, RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); non_l2_sbs = feat_num[QED_RDMA_CNQ]; } - - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || - p_hwfn->hw_info.personality == QED_PCI_ETH) { + if (QED_IS_L2_PERSONALITY(p_hwfn)) { /* Start by allocating VF queues, then PF's */ - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), - sb_cnt_info.sb_iov_cnt); + sb_cnt.iov_cnt); feat_num[QED_PF_L2_QUE] = min_t(u32, - RESC_NUM(p_hwfn, QED_SB) - - non_l2_sbs, + sb_cnt.cnt - non_l2_sbs, RESC_NUM(p_hwfn, QED_L2_QUEUE) - FEAT_NUM(p_hwfn, QED_VF_L2_QUE)); } - if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) - feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), + if (QED_IS_FCOE_PERSONALITY(p_hwfn)) + feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + + if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) + feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), - RESC_NUM(p_hwfn, QED_SB)); + (int)sb_cnt.cnt); } const char *qed_hw_get_resc_name(enum qed_resources res_id) @@ -2188,7 +2228,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, { u8 num_funcs = p_hwfn->num_funcs_on_engine; bool b_ah = QED_IS_AH(p_hwfn->cdev); - struct qed_sb_cnt_info sb_cnt_info; switch (res_id) { case QED_L2_QUEUE: @@ -2240,9 +2279,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, *p_resc_num = 1; break; case QED_SB: - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - *p_resc_num = sb_cnt_info.sb_cnt; + /* Since we want its value to reflect whether MFW supports + * the new scheme, have a default of 0. + */ + *p_resc_num = 0; break; default: return -EINVAL; @@ -2252,7 +2292,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, case QED_BDQ: if (!*p_resc_num) *p_resc_start = 0; - else if (p_hwfn->cdev->num_ports_in_engines == 4) + else if (p_hwfn->cdev->num_ports_in_engine == 4) *p_resc_start = p_hwfn->port_id; else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) *p_resc_start = p_hwfn->port_id; @@ -2311,11 +2351,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, goto out; } - /* Special handling for status blocks; Would be revised in future */ - if (res_id == QED_SB) { - *p_resc_num -= 1; - *p_resc_start -= p_hwfn->enabled_func_idx; - } out: /* PQs have to divide by 8 [that's the HW granularity]. * Reduce number so it would fit. @@ -2413,6 +2448,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return -EINVAL; } + /* This will also learn the number of SBs from MFW */ + if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) + return -EINVAL; + qed_hw_set_feat(p_hwfn); for (res_id = 0; res_id < QED_MAX_RESC; res_id++) @@ -2669,15 +2708,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); if (port_mode < 3) { - p_hwfn->cdev->num_ports_in_engines = 1; + p_hwfn->cdev->num_ports_in_engine = 1; } else if (port_mode <= 5) { - p_hwfn->cdev->num_ports_in_engines = 2; + p_hwfn->cdev->num_ports_in_engine = 2; } else { DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", - p_hwfn->cdev->num_ports_in_engines); + p_hwfn->cdev->num_ports_in_engine); - /* Default num_ports_in_engines to something */ - p_hwfn->cdev->num_ports_in_engines = 1; + /* Default num_ports_in_engine to something */ + p_hwfn->cdev->num_ports_in_engine = 1; } } @@ -2687,20 +2726,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, u32 port; int i; - p_hwfn->cdev->num_ports_in_engines = 0; + p_hwfn->cdev->num_ports_in_engine = 0; for (i = 0; i < MAX_NUM_PORTS_K2; i++) { port = qed_rd(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); if (port & 1) - p_hwfn->cdev->num_ports_in_engines++; + p_hwfn->cdev->num_ports_in_engine++; } - if (!p_hwfn->cdev->num_ports_in_engines) { + if (!p_hwfn->cdev->num_ports_in_engine) { DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engines = 1; + p_hwfn->cdev->num_ports_in_engine = 1; } } @@ -2819,12 +2858,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) cdev->chip_num, cdev->chip_rev, cdev->chip_bond_id, cdev->chip_metal); - if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { - DP_NOTICE(cdev->hwfns, - "The chip type/rev (BB A0) is not supported!\n"); - return -EINVAL; - } - return 0; } @@ -3051,12 +3084,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) } pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - dma_free_coherent(&cdev->pdev->dev, - pbl_size, - p_chain->pbl_sp.p_virt_table, - p_chain->pbl_sp.p_phys_table); + + if (!p_chain->b_external_pbl) + dma_free_coherent(&cdev->pdev->dev, + pbl_size, + p_chain->pbl_sp.p_virt_table, + p_chain->pbl_sp.p_phys_table); out: vfree(p_chain->pbl.pp_virt_addr_tbl); + p_chain->pbl.pp_virt_addr_tbl = NULL; } void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) @@ -3150,7 +3186,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) return 0; } -static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +static int +qed_chain_alloc_pbl(struct qed_dev *cdev, + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl) { u32 page_cnt = p_chain->page_cnt, size, i; dma_addr_t p_phys = 0, p_pbl_phys = 0; @@ -3170,8 +3209,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) * should be saved to allow its freeing during the error flow. */ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, GFP_KERNEL); + + if (!ext_pbl) { + p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_pbl_phys, GFP_KERNEL); + } else { + p_pbl_virt = ext_pbl->p_pbl_virt; + p_pbl_phys = ext_pbl->p_pbl_phys; + p_chain->b_external_pbl = true; + } + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) @@ -3204,7 +3251,10 @@ int qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type, - u32 num_elems, size_t elem_size, struct qed_chain *p_chain) + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl) { u32 page_cnt; int rc = 0; @@ -3235,7 +3285,7 @@ int qed_chain_alloc(struct qed_dev *cdev, rc = qed_chain_alloc_single(cdev, p_chain); break; case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, p_chain); + rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); break; } if (rc) @@ -4074,10 +4124,21 @@ static int qed_device_num_ports(struct qed_dev *cdev) if (cdev->num_hwfns > 1) return 1; - return cdev->num_ports_in_engines * qed_device_num_engines(cdev); + return cdev->num_ports_in_engine * qed_device_num_engines(cdev); } int qed_device_get_port_id(struct qed_dev *cdev) { return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); } + +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 12d16c096e36..1f1df1bf127c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, * @param num_elems * @param elem_size * @param p_chain + * @param ext_pbl - a possible external PBL * * @return int */ @@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type, - u32 num_elems, size_t elem_size, struct qed_chain *p_chain); + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); /** * @brief qed_chain_free - Free chain DMA memory diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 21a58fffd02b..df195c02b711 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -43,7 +43,6 @@ #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> -#include <linux/version.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/list.h> @@ -142,6 +141,15 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data = &p_ramrod->init_ramrod_data; fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + /* Sanity */ + if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", + fcoe_pf_params->num_cqs, + p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); + return -EINVAL; + } + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); p_data->sq_num_pages_in_pbl = tmp; @@ -184,7 +192,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data->q_params.queue_relative_offset = (u8)tmp; for (i = 0; i < fcoe_pf_params->num_cqs; i++) { - tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); + u16 igu_sb_id; + + igu_sb_id = qed_get_igu_sb_id(p_hwfn, i); + tmp = cpu_to_le16(igu_sb_id); p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; } @@ -539,7 +550,7 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, } } -struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { struct qed_fcoe_info *p_fcoe_info; @@ -547,19 +558,21 @@ struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); if (!p_fcoe_info) { DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); - return NULL; + return -ENOMEM; } INIT_LIST_HEAD(&p_fcoe_info->free_list); - return p_fcoe_info; + + p_hwfn->p_fcoe_info = p_fcoe_info; + return 0; } -void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { struct fcoe_task_context *p_task_ctx = NULL; int rc; u32 i; - spin_lock_init(&p_fcoe_info->lock); + spin_lock_init(&p_hwfn->p_fcoe_info->lock); for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { rc = qed_cxt_get_task_ctx(p_hwfn, i, QED_CTX_WORKING_MEM, @@ -577,15 +590,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) } } -void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +void qed_fcoe_free(struct qed_hwfn *p_hwfn) { struct qed_fcoe_conn *p_conn = NULL; - if (!p_fcoe_info) + if (!p_hwfn->p_fcoe_info) return; - while (!list_empty(&p_fcoe_info->free_list)) { - p_conn = list_first_entry(&p_fcoe_info->free_list, + while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) { + p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list, struct qed_fcoe_conn, list_entry); if (!p_conn) break; @@ -593,7 +606,8 @@ void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) qed_fcoe_free_connection(p_hwfn, p_conn); } - kfree(p_fcoe_info); + kfree(p_hwfn->p_fcoe_info); + p_hwfn->p_fcoe_info = NULL; } static int @@ -734,6 +748,11 @@ static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, info->secondary_bdq_rq_addr = qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + info->wwpn = hwfn->mcp_info->func_info.wwn_port; + info->wwnn = hwfn->mcp_info->func_info.wwn_node; + + info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); + return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 472af34a171d..027a76ac839a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -49,29 +49,21 @@ struct qed_fcoe_info { }; #if IS_ENABLED(CONFIG_QED_FCOE) -struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn); +int qed_fcoe_alloc(struct qed_hwfn *p_hwfn); -void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); +void qed_fcoe_setup(struct qed_hwfn *p_hwfn); -void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); +void qed_fcoe_free(struct qed_hwfn *p_hwfn); void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, struct qed_mcp_fcoe_stats *stats); #else /* CONFIG_QED_FCOE */ -static inline struct qed_fcoe_info * -qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { - return NULL; + return -EINVAL; } -static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn, - struct qed_fcoe_info *p_fcoe_info) -{ -} - -static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn, - struct qed_fcoe_info *p_fcoe_info) -{ -} +static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {} +static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, struct qed_mcp_fcoe_stats *stats) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 858a57a73589..31fb0bffa098 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -46,6 +46,7 @@ #include <linux/qed/fcoe_common.h> #include <linux/qed/eth_common.h> #include <linux/qed/iscsi_common.h> +#include <linux/qed/iwarp_common.h> #include <linux/qed/rdma_common.h> #include <linux/qed/roce_common.h> #include <linux/qed/qed_fcoe_if.h> @@ -346,7 +347,7 @@ struct xstorm_core_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -368,85 +369,85 @@ struct tstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -681,7 +682,9 @@ struct core_rx_fast_path_cqe { __le16 packet_length; __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved[4]; + struct parsing_err_flags err_flags; + __le16 reserved0; + __le32 reserved1[3]; }; struct core_rx_gsi_offload_cqe { @@ -692,7 +695,7 @@ struct core_rx_gsi_offload_cqe { __le16 vlan; __le32 src_mac_addrhi; __le16 src_mac_addrlo; - u8 reserved1[2]; + __le16 qp_id; __le32 gid_dst[4]; }; @@ -774,15 +777,15 @@ struct core_tx_bd { __le16 bitfield1; #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 -#define CORE_TX_BD_TX_DST_MASK 0x1 -#define CORE_TX_BD_TX_DST_SHIFT 14 -#define CORE_TX_BD_RESERVED_MASK 0x1 -#define CORE_TX_BD_RESERVED_SHIFT 15 +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 }; enum core_tx_dest { CORE_TX_DEST_NW, CORE_TX_DEST_LB, + CORE_TX_DEST_RESERVED, + CORE_TX_DEST_DROP, MAX_CORE_TX_DEST }; @@ -804,12 +807,12 @@ struct core_tx_stop_ramrod_data { __le32 reserved0[2]; }; -enum dcb_dhcp_update_flag { - DONT_UPDATE_DCB_DHCP, +enum dcb_dscp_update_mode { + DONT_UPDATE_DCB_DSCP, UPDATE_DCB, UPDATE_DSCP, UPDATE_DCB_DSCP, - MAX_DCB_DHCP_UPDATE_FLAG + MAX_DCB_DSCP_UPDATE_MODE }; struct eth_mstorm_per_pf_stat { @@ -917,6 +920,14 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +enum iwarp_ll2_tx_queues { + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, + IWARP_LL2_ALIGNED_TX_QUEUE, + IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, + IWARP_LL2_ERROR, + MAX_IWARP_LL2_TX_QUEUES +}; + /* Mstorm non-triggering VF zone */ enum malicious_vf_error_id { MALICIOUS_VF_NO_ERROR, @@ -960,7 +971,7 @@ enum personality_type { PERSONALITY_ISCSI, PERSONALITY_FCOE, PERSONALITY_RDMA_AND_ETH, - PERSONALITY_RESERVED3, + PERSONALITY_RDMA, PERSONALITY_CORE, PERSONALITY_ETH, PERSONALITY_RESERVED4, @@ -971,16 +982,12 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; }; @@ -990,6 +997,7 @@ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; struct regpair consolid_q_pbl_addr; struct pf_start_tunnel_config tunnel_config; + __le32 reserved; __le16 event_ring_sb_id; u8 base_vf_id; u8 num_vfs; @@ -1007,7 +1015,6 @@ struct pf_start_ramrod_data { u8 pri_map_valid; __le32 outer_tag; struct hsi_fp_ver_struct hsi_fp_ver; - }; struct protocol_dcb_data { @@ -1023,14 +1030,8 @@ struct pf_update_tunnel_config { u8 update_rx_pf_clss; u8 update_rx_def_ucast_clss; u8 update_rx_def_non_ucast_clss; - u8 update_tx_pf_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; @@ -1038,17 +1039,17 @@ struct pf_update_tunnel_config { u8 tunnel_clss_ipgre; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved[2]; + __le16 reserved; }; struct pf_update_ramrod_data { u8 pf_id; - u8 update_eth_dcb_data_flag; - u8 update_fcoe_dcb_data_flag; - u8 update_iscsi_dcb_data_flag; - u8 update_roce_dcb_data_flag; - u8 update_rroce_dcb_data_flag; - u8 update_iwarp_dcb_data_flag; + u8 update_eth_dcb_data_mode; + u8 update_fcoe_dcb_data_mode; + u8 update_iscsi_dcb_data_mode; + u8 update_roce_dcb_data_mode; + u8 update_rroce_dcb_data_mode; + u8 update_iwarp_dcb_data_mode; u8 update_mf_vlan_flag; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; @@ -1127,7 +1128,7 @@ struct tstorm_per_port_stat { struct regpair iscsi_irregular_pkt; struct regpair fcoe_irregular_pkt; struct regpair roce_irregular_pkt; - struct regpair reserved; + struct regpair iwarp_irregular_pkt; struct regpair eth_irregular_pkt; struct regpair reserved1; struct regpair preroce_irregular_pkt; @@ -1326,6 +1327,87 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; +struct mstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + /* IGU cleanup command */ struct igu_cleanup { __le32 sb_id_and_flags; @@ -1389,44 +1471,6 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; - -struct mstorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1541,50 +1585,6 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 20 }; -struct ystorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2; - u8 byte3; - __le16 word0; - __le32 reg0; - __le32 reg1; - __le16 word1; - __le16 word2; - __le16 word3; - __le16 word4; - __le32 reg2; - __le32 reg3; -}; - /****************************************/ /* Debug Tools HSI constants and macros */ /****************************************/ @@ -1643,6 +1643,8 @@ enum block_addr { GRCBASE_MULD = 0x4e0000, GRCBASE_YULD = 0x4c8000, GRCBASE_XYLD = 0x4c0000, + GRCBASE_PTLD = 0x590000, + GRCBASE_YPLD = 0x5b0000, GRCBASE_PRM = 0x230000, GRCBASE_PBF_PB1 = 0xda0000, GRCBASE_PBF_PB2 = 0xda4000, @@ -1656,6 +1658,10 @@ enum block_addr { GRCBASE_TCFC = 0x2d0000, GRCBASE_IGU = 0x180000, GRCBASE_CAU = 0x1c0000, + GRCBASE_RGFS = 0xf00000, + GRCBASE_RGSRC = 0x320000, + GRCBASE_TGFS = 0xd00000, + GRCBASE_TGSRC = 0x322000, GRCBASE_UMAC = 0x51000, GRCBASE_XMAC = 0x210000, GRCBASE_DBG = 0x10000, @@ -1669,10 +1675,6 @@ enum block_addr { GRCBASE_PHY_PCIE = 0x620000, GRCBASE_LED = 0x6b8000, GRCBASE_AVS_WRAP = 0x6b0000, - GRCBASE_RGFS = 0x19d0000, - GRCBASE_TGFS = 0x19e0000, - GRCBASE_PTLD = 0x19f0000, - GRCBASE_YPLD = 0x1a10000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -1732,6 +1734,8 @@ enum block_id { BLOCK_MULD, BLOCK_YULD, BLOCK_XYLD, + BLOCK_PTLD, + BLOCK_YPLD, BLOCK_PRM, BLOCK_PBF_PB1, BLOCK_PBF_PB2, @@ -1745,6 +1749,10 @@ enum block_id { BLOCK_TCFC, BLOCK_IGU, BLOCK_CAU, + BLOCK_RGFS, + BLOCK_RGSRC, + BLOCK_TGFS, + BLOCK_TGSRC, BLOCK_UMAC, BLOCK_XMAC, BLOCK_DBG, @@ -1758,10 +1766,6 @@ enum block_id { BLOCK_PHY_PCIE, BLOCK_LED, BLOCK_AVS_WRAP, - BLOCK_RGFS, - BLOCK_TGFS, - BLOCK_PTLD, - BLOCK_YPLD, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID @@ -1780,6 +1784,10 @@ enum bin_dbg_buffer_type { BIN_BUF_DBG_ATTN_REGS, BIN_BUF_DBG_ATTN_INDEXES, BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BUS_BLOCKS, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BUS_BLOCKS_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, BIN_BUF_DBG_PARSING_STRINGS, MAX_BIN_DBG_BUFFER_TYPE }; @@ -1862,6 +1870,29 @@ enum dbg_attn_type { MAX_DBG_ATTN_TYPE }; +struct dbg_bus_block { + u8 num_of_lines; + u8 has_latency_events; + __le16 lines_offset; +}; + +struct dbg_bus_block_user_data { + u8 num_of_lines; + u8 has_latency_events; + __le16 names_offset; +}; + +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + /* condition header for registers dump */ struct dbg_dump_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ @@ -1879,17 +1910,21 @@ struct dbg_dump_mem { __le32 dword1; #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_RESERVED_MASK 0xFF -#define DBG_DUMP_MEM_RESERVED_SHIFT 24 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 }; /* register data for registers dump */ struct dbg_dump_reg { __le32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */ +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ -#define DBG_DUMP_REG_LENGTH_SHIFT 24 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */ +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ +#define DBG_DUMP_REG_LENGTH_SHIFT 24 }; /* split header for registers dump */ @@ -1910,20 +1945,24 @@ struct dbg_idle_chk_cond_hdr { /* Idle Check condition register */ struct dbg_idle_chk_cond_reg { __le32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries; /* number of registers entries to check */ - u8 entry_size; /* size of registers entry (in dwords) */ - u8 start_entry; /* index of the first entry to check */ + __le16 num_entries; + u8 entry_size; + u8 start_entry; }; /* Idle Check info register */ struct dbg_idle_chk_info_reg { __le32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 __le16 size; /* register size in dwords */ @@ -1996,15 +2035,17 @@ enum dbg_idle_chk_severity_types { /* Debug Bus block data */ struct dbg_bus_block_data { - u8 enabled; /* Indicates if the block is enabled for recording (0/1) */ - u8 hw_id; /* HW ID associated with the block */ - u8 line_num; /* Debug line number to select */ - u8 right_shift; /* Number of units to right the debug data (0-3) */ - u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */ - u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */ - u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced. - */ - u8 reserved; + __le16 data; +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 + u8 line_num; + u8 hw_id; }; /* Debug Bus Clients */ @@ -2045,6 +2086,14 @@ enum dbg_bus_constraint_ops { MAX_DBG_BUS_CONSTRAINT_OPS }; +struct dbg_bus_trigger_state_data { + u8 data; +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 +}; + /* Debug Bus memory address */ struct dbg_bus_mem_addr { __le32 lo; @@ -2078,66 +2127,42 @@ union dbg_bus_storm_eid_params { /* Debug Bus Storm data */ struct dbg_bus_storm_data { - u8 fast_enabled; - u8 fast_mode; - u8 slow_enabled; - u8 slow_mode; + u8 enabled; + u8 mode; u8 hw_id; u8 eid_filter_en; u8 eid_range_not_mask; u8 cid_filter_en; union dbg_bus_storm_eid_params eid_filter_params; - __le16 reserved; __le32 cid; }; /* Debug Bus data */ struct dbg_bus_data { - __le32 app_version; /* The tools version number of the application */ - u8 state; /* The current debug bus state */ - u8 hw_dwords; /* HW dwords per cycle */ - u8 next_hw_id; /* Next HW ID to be associated with an input */ - u8 num_enabled_blocks; /* Number of blocks enabled for recording */ - u8 num_enabled_storms; /* Number of Storms enabled for recording */ - u8 target; /* Output target */ - u8 next_trigger_state; /* ID of next trigger state to be added */ - u8 next_constraint_id; /* ID of next filter/trigger constraint to be - * added. - */ - u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */ - u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */ - u8 timestamp_input_en; /* Indicates if timestamp recording is enabled - * (0/1). - */ - u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */ - u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */ - u8 adding_filter; /* If true, the next added constraint belong to the - * filter. Otherwise, it belongs to the last added - * trigger state. Valid only if either filter or - * triggers are enabled. - */ - u8 filter_pre_trigger; /* Indicates if the recording filter should be - * applied before the trigger. Valid only if both - * filter and trigger are enabled (0/1). - */ - u8 filter_post_trigger; /* Indicates if the recording filter should be - * applied after the trigger. Valid only if both - * filter and trigger are enabled (0/1). - */ - u8 unify_inputs; /* If true, all inputs are associated with HW ID 0. - * Otherwise, each input is assigned a different HW ID - * (0/1). - */ - u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW - * recording to this engine (0/1). - */ - struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid - * only when the target is - * DBG_BUS_TARGET_ID_PCI. - */ + __le32 app_version; + u8 state; + u8 hw_dwords; + __le16 hw_id_mask; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; __le16 reserved; - struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */ - struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */ + u8 trigger_en; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 next_trigger_state; + u8 next_constraint_id; + u8 unify_inputs; + u8 rcv_from_other_engine; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[88]; + struct dbg_bus_storm_data storms[6]; }; enum dbg_bus_filter_types { @@ -2156,12 +2181,6 @@ enum dbg_bus_frame_modes { MAX_DBG_BUS_FRAME_MODES }; -enum dbg_bus_input_types { - DBG_BUS_INPUT_TYPE_STORM, - DBG_BUS_INPUT_TYPE_BLOCK, - MAX_DBG_BUS_INPUT_TYPES -}; - enum dbg_bus_other_engine_modes { DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, @@ -2185,19 +2204,19 @@ enum dbg_bus_pre_trigger_types { }; enum dbg_bus_semi_frame_modes { - DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, - DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = + 0, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = + 3, MAX_DBG_BUS_SEMI_FRAME_MODES }; /* Debug bus states */ enum dbg_bus_states { - DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */ - DBG_BUS_STATE_READY, /* debug bus is ready for configuration and - * recording. - */ - DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */ - DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */ + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, MAX_DBG_BUS_STATES }; @@ -2216,11 +2235,8 @@ enum dbg_bus_storm_modes { /* Debug bus target IDs */ enum dbg_bus_targets { - /* records debug bus to DBG block internal buffer */ DBG_BUS_TARGET_ID_INT_BUF, - /* records debug bus to the NW */ DBG_BUS_TARGET_ID_NIG, - /* records debug bus to a PCI buffer */ DBG_BUS_TARGET_ID_PCI, MAX_DBG_BUS_TARGETS }; @@ -2235,48 +2251,45 @@ struct dbg_grc_data { /* Debug GRC params */ enum dbg_grc_params { - DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */ - DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */ - DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */ - DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */ - DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */ - DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */ - DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */ - DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */ - DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */ - DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */ - DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */ - DBG_GRC_PARAM_RESERVED, /* reserved */ - DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */ - DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */ - DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */ - DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */ - DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */ - DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */ - DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */ - DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */ - DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */ - DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */ - DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */ - DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */ - DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */ - DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */ - DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */ - DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */ - /* preset: exclude all memories from dump (1 only) */ + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_RESERVED, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_DUMP_NIG, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_NUM_LCIDS, + DBG_GRC_PARAM_NUM_LTIDS, DBG_GRC_PARAM_EXCLUDE_ALL, - /* preset: include memories for crash dump (1 only) */ DBG_GRC_PARAM_CRASH, - /* perform dump only if MFW is responding (0/1) */ DBG_GRC_PARAM_PARITY_SAFE, - DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */ - DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */ + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, DBG_GRC_PARAM_NO_MCP, DBG_GRC_PARAM_NO_FW_VER, MAX_DBG_GRC_PARAMS @@ -2347,7 +2360,10 @@ enum dbg_status { DBG_STATUS_REG_FIFO_BAD_DATA, DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_MULTI_BLOCKS_WITH_FILTER, + DBG_STATUS_FILTER_BUG, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET, + DBG_STATUS_DBG_BUS_IN_USE, MAX_DBG_STATUS }; @@ -2364,25 +2380,22 @@ enum dbg_storms { /* Idle Check data */ struct idle_chk_data { - __le32 buf_size; /* Idle check buffer size in dwords */ - u8 buf_size_set; /* Indicates if the idle check buffer size was set - * (0/1). - */ + __le32 buf_size; + u8 buf_size_set; u8 reserved1; __le16 reserved2; }; /* Debug Tools data (per HW function) */ struct dbg_tools_data { - struct dbg_grc_data grc; /* GRC Dump data */ - struct dbg_bus_data bus; /* Debug Bus data */ - struct idle_chk_data idle_chk; /* Idle Check data */ - u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */ - u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1). - */ - u8 chip_id; /* Chip ID (from enum chip_ids) */ - u8 platform_id; /* Platform ID (from enum platform_ids) */ - u8 initialized; /* Indicates if the data was initialized */ + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[88]; + u8 chip_id; + u8 platform_id; + u8 initialized; u8 reserved; }; @@ -2464,6 +2477,12 @@ struct init_qm_vport_params { /* Max size in dwords of a zipped array */ #define MAX_ZIPPED_SIZE 8192 +enum chip_ids { + CHIP_BB, + CHIP_K2, + CHIP_RESERVED, + MAX_CHIP_IDS +}; struct fw_asserts_ram_section { __le16 section_ram_line_offset; @@ -2475,18 +2494,18 @@ struct fw_asserts_ram_section { }; struct fw_ver_num { - u8 major; /* Firmware major version number */ - u8 minor; /* Firmware minor version number */ - u8 rev; /* Firmware revision version number */ - u8 eng; /* Firmware engineering version number (for bootleg versions) */ + u8 major; + u8 minor; + u8 rev; + u8 eng; }; struct fw_ver_info { - __le16 tools_ver; /* Tools version number */ - u8 image_id; /* FW image ID (e.g. main) */ + __le16 tools_ver; + u8 image_id; u8 reserved1; - struct fw_ver_num num; /* FW version number */ - __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */ + struct fw_ver_num num; + __le32 timestamp; __le32 reserved2; }; @@ -2722,7 +2741,6 @@ struct init_read_op { #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; - }; /* Init operations union */ @@ -2782,6 +2800,7 @@ struct iro { * @param bin_ptr - a pointer to the binary data with debug arrays. */ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); + /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. @@ -2805,6 +2824,7 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. * @@ -2824,6 +2844,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size * for idle check results. @@ -2840,6 +2861,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results * into the specified buffer. @@ -2860,6 +2882,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size * for mcp trace results. @@ -2878,6 +2901,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results * into the specified buffer. @@ -2902,6 +2926,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size * for grc trace fifo results. @@ -2917,6 +2942,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into * the specified buffer. @@ -2938,6 +2964,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size * for the IGU fifo results. @@ -2954,6 +2981,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into * the specified buffer. @@ -2975,6 +3003,7 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required * buffer size for protection override window results. @@ -3048,6 +3077,29 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + +/** + * @brief qed_dbg_read_attn - Reads the attention registers of the specified + * block and type, and writes the results into the specified buffer. + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param block - Block ID. + * @param attn_type - Attention type. + * @param clear_status - Indicates if the attention status should be cleared. + * @param results - OUT: Pointer to write the read results into + * + * @return error if one of the following holds: + * - the version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results); + /** * @brief qed_dbg_print_attn - Prints attention registers values in the * specified results struct. @@ -3074,6 +3126,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, * @param bin_ptr - a pointer to the binary data with debug arrays. */ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); + /** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * @@ -3082,6 +3135,7 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); * @return a string for the specified status */ const char *qed_dbg_get_status_str(enum dbg_status status); + /** * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size * for idle check results (in bytes). @@ -3116,6 +3170,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, char *results_buf, u32 *num_errors, u32 *num_warnings); + /** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size * for MCP Trace results (in bytes). @@ -3132,6 +3187,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_mcp_trace_results - Prints MCP Trace results * @@ -3146,6 +3202,7 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). @@ -3162,6 +3219,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_reg_fifo_results - Prints reg fifo results * @@ -3176,6 +3234,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size * for igu_fifo results (in bytes). @@ -3192,6 +3251,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_igu_fifo_results - Prints IGU fifo results * @@ -3206,6 +3266,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_protection_override_results_buf_size - Returns the required * buffer size for protection override results (in bytes). @@ -3223,6 +3284,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_protection_override_results - Prints protection override * results. @@ -3238,6 +3300,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size * for FW Asserts results (in bytes). @@ -3254,6 +3317,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_fw_asserts_results - Prints FW Asserts results * @@ -3268,6 +3332,283 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + +/** + * @brief qed_dbg_parse_attn - Parses and prints attention registers values in + * the specified results struct. + * + * @param p_hwfn - HW device data + * @param results - Pointer to the attention read results + * + * @return error if one of the following holds: + * - the version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); + +/* Debug Bus blocks */ +static const u32 dbg_bus_blocks[] = { + 0x0000000f, /* grc, bb, 15 lines */ + 0x0000000f, /* grc, k2, 15 lines */ + 0x00000000, + 0x00000000, /* miscs, bb, 0 lines */ + 0x00000000, /* miscs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* misc, bb, 0 lines */ + 0x00000000, /* misc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* dbu, bb, 0 lines */ + 0x00000000, /* dbu, k2, 0 lines */ + 0x00000000, + 0x000f0127, /* pglue_b, bb, 39 lines */ + 0x0036012a, /* pglue_b, k2, 42 lines */ + 0x00000000, + 0x00000000, /* cnig, bb, 0 lines */ + 0x00120102, /* cnig, k2, 2 lines */ + 0x00000000, + 0x00000000, /* cpmu, bb, 0 lines */ + 0x00000000, /* cpmu, k2, 0 lines */ + 0x00000000, + 0x00000001, /* ncsi, bb, 1 lines */ + 0x00000001, /* ncsi, k2, 1 lines */ + 0x00000000, + 0x00000000, /* opte, bb, 0 lines */ + 0x00000000, /* opte, k2, 0 lines */ + 0x00000000, + 0x00600085, /* bmb, bb, 133 lines */ + 0x00600085, /* bmb, k2, 133 lines */ + 0x00000000, + 0x00000000, /* pcie, bb, 0 lines */ + 0x00e50033, /* pcie, k2, 51 lines */ + 0x00000000, + 0x00000000, /* mcp, bb, 0 lines */ + 0x00000000, /* mcp, k2, 0 lines */ + 0x00000000, + 0x01180009, /* mcp2, bb, 9 lines */ + 0x01180009, /* mcp2, k2, 9 lines */ + 0x00000000, + 0x01210104, /* pswhst, bb, 4 lines */ + 0x01210104, /* pswhst, k2, 4 lines */ + 0x00000000, + 0x01250103, /* pswhst2, bb, 3 lines */ + 0x01250103, /* pswhst2, k2, 3 lines */ + 0x00000000, + 0x00340101, /* pswrd, bb, 1 lines */ + 0x00340101, /* pswrd, k2, 1 lines */ + 0x00000000, + 0x01280119, /* pswrd2, bb, 25 lines */ + 0x01280119, /* pswrd2, k2, 25 lines */ + 0x00000000, + 0x01410109, /* pswwr, bb, 9 lines */ + 0x01410109, /* pswwr, k2, 9 lines */ + 0x00000000, + 0x00000000, /* pswwr2, bb, 0 lines */ + 0x00000000, /* pswwr2, k2, 0 lines */ + 0x00000000, + 0x001c0001, /* pswrq, bb, 1 lines */ + 0x001c0001, /* pswrq, k2, 1 lines */ + 0x00000000, + 0x014a0015, /* pswrq2, bb, 21 lines */ + 0x014a0015, /* pswrq2, k2, 21 lines */ + 0x00000000, + 0x00000000, /* pglcs, bb, 0 lines */ + 0x00120006, /* pglcs, k2, 6 lines */ + 0x00000000, + 0x00100001, /* dmae, bb, 1 lines */ + 0x00100001, /* dmae, k2, 1 lines */ + 0x00000000, + 0x015f0105, /* ptu, bb, 5 lines */ + 0x015f0105, /* ptu, k2, 5 lines */ + 0x00000000, + 0x01640120, /* tcm, bb, 32 lines */ + 0x01640120, /* tcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* mcm, bb, 32 lines */ + 0x01640120, /* mcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* ucm, bb, 32 lines */ + 0x01640120, /* ucm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* xcm, bb, 32 lines */ + 0x01640120, /* xcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* ycm, bb, 32 lines */ + 0x01640120, /* ycm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* pcm, bb, 32 lines */ + 0x01640120, /* pcm, k2, 32 lines */ + 0x00000000, + 0x01840062, /* qm, bb, 98 lines */ + 0x01840062, /* qm, k2, 98 lines */ + 0x00000000, + 0x01e60021, /* tm, bb, 33 lines */ + 0x01e60021, /* tm, k2, 33 lines */ + 0x00000000, + 0x02070107, /* dorq, bb, 7 lines */ + 0x02070107, /* dorq, k2, 7 lines */ + 0x00000000, + 0x00600185, /* brb, bb, 133 lines */ + 0x00600185, /* brb, k2, 133 lines */ + 0x00000000, + 0x020e0019, /* src, bb, 25 lines */ + 0x020c001a, /* src, k2, 26 lines */ + 0x00000000, + 0x02270104, /* prs, bb, 4 lines */ + 0x02270104, /* prs, k2, 4 lines */ + 0x00000000, + 0x022b0133, /* tsdm, bb, 51 lines */ + 0x022b0133, /* tsdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* msdm, bb, 51 lines */ + 0x022b0133, /* msdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* usdm, bb, 51 lines */ + 0x022b0133, /* usdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* xsdm, bb, 51 lines */ + 0x022b0133, /* xsdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* ysdm, bb, 51 lines */ + 0x022b0133, /* ysdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* psdm, bb, 51 lines */ + 0x022b0133, /* psdm, k2, 51 lines */ + 0x00000000, + 0x025e010c, /* tsem, bb, 12 lines */ + 0x025e010c, /* tsem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* msem, bb, 12 lines */ + 0x025e010c, /* msem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* usem, bb, 12 lines */ + 0x025e010c, /* usem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* xsem, bb, 12 lines */ + 0x025e010c, /* xsem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* ysem, bb, 12 lines */ + 0x025e010c, /* ysem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* psem, bb, 12 lines */ + 0x025e010c, /* psem, k2, 12 lines */ + 0x00000000, + 0x026a000d, /* rss, bb, 13 lines */ + 0x026a000d, /* rss, k2, 13 lines */ + 0x00000000, + 0x02770106, /* tmld, bb, 6 lines */ + 0x02770106, /* tmld, k2, 6 lines */ + 0x00000000, + 0x027d0106, /* muld, bb, 6 lines */ + 0x027d0106, /* muld, k2, 6 lines */ + 0x00000000, + 0x02770005, /* yuld, bb, 5 lines */ + 0x02770005, /* yuld, k2, 5 lines */ + 0x00000000, + 0x02830107, /* xyld, bb, 7 lines */ + 0x027d0107, /* xyld, k2, 7 lines */ + 0x00000000, + 0x00000000, /* ptld, bb, 0 lines */ + 0x00000000, /* ptld, k2, 0 lines */ + 0x00000000, + 0x00000000, /* ypld, bb, 0 lines */ + 0x00000000, /* ypld, k2, 0 lines */ + 0x00000000, + 0x028a010e, /* prm, bb, 14 lines */ + 0x02980110, /* prm, k2, 16 lines */ + 0x00000000, + 0x02a8000d, /* pbf_pb1, bb, 13 lines */ + 0x02a8000d, /* pbf_pb1, k2, 13 lines */ + 0x00000000, + 0x02a8000d, /* pbf_pb2, bb, 13 lines */ + 0x02a8000d, /* pbf_pb2, k2, 13 lines */ + 0x00000000, + 0x02a8000d, /* rpb, bb, 13 lines */ + 0x02a8000d, /* rpb, k2, 13 lines */ + 0x00000000, + 0x00600185, /* btb, bb, 133 lines */ + 0x00600185, /* btb, k2, 133 lines */ + 0x00000000, + 0x02b50117, /* pbf, bb, 23 lines */ + 0x02b50117, /* pbf, k2, 23 lines */ + 0x00000000, + 0x02cc0006, /* rdif, bb, 6 lines */ + 0x02cc0006, /* rdif, k2, 6 lines */ + 0x00000000, + 0x02d20006, /* tdif, bb, 6 lines */ + 0x02d20006, /* tdif, k2, 6 lines */ + 0x00000000, + 0x02d80003, /* cdu, bb, 3 lines */ + 0x02db000e, /* cdu, k2, 14 lines */ + 0x00000000, + 0x02e9010d, /* ccfc, bb, 13 lines */ + 0x02f60117, /* ccfc, k2, 23 lines */ + 0x00000000, + 0x02e9010d, /* tcfc, bb, 13 lines */ + 0x02f60117, /* tcfc, k2, 23 lines */ + 0x00000000, + 0x030d0133, /* igu, bb, 51 lines */ + 0x030d0133, /* igu, k2, 51 lines */ + 0x00000000, + 0x03400106, /* cau, bb, 6 lines */ + 0x03400106, /* cau, k2, 6 lines */ + 0x00000000, + 0x00000000, /* rgfs, bb, 0 lines */ + 0x00000000, /* rgfs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* rgsrc, bb, 0 lines */ + 0x00000000, /* rgsrc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* tgfs, bb, 0 lines */ + 0x00000000, /* tgfs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* tgsrc, bb, 0 lines */ + 0x00000000, /* tgsrc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* umac, bb, 0 lines */ + 0x00120006, /* umac, k2, 6 lines */ + 0x00000000, + 0x00000000, /* xmac, bb, 0 lines */ + 0x00000000, /* xmac, k2, 0 lines */ + 0x00000000, + 0x00000000, /* dbg, bb, 0 lines */ + 0x00000000, /* dbg, k2, 0 lines */ + 0x00000000, + 0x0346012b, /* nig, bb, 43 lines */ + 0x0346011d, /* nig, k2, 29 lines */ + 0x00000000, + 0x00000000, /* wol, bb, 0 lines */ + 0x001c0002, /* wol, k2, 2 lines */ + 0x00000000, + 0x00000000, /* bmbn, bb, 0 lines */ + 0x00210008, /* bmbn, k2, 8 lines */ + 0x00000000, + 0x00000000, /* ipc, bb, 0 lines */ + 0x00000000, /* ipc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* nwm, bb, 0 lines */ + 0x0371000b, /* nwm, k2, 11 lines */ + 0x00000000, + 0x00000000, /* nws, bb, 0 lines */ + 0x037c0009, /* nws, k2, 9 lines */ + 0x00000000, + 0x00000000, /* ms, bb, 0 lines */ + 0x00120004, /* ms, k2, 4 lines */ + 0x00000000, + 0x00000000, /* phy_pcie, bb, 0 lines */ + 0x00e5001a, /* phy_pcie, k2, 26 lines */ + 0x00000000, + 0x00000000, /* led, bb, 0 lines */ + 0x00000000, /* led, k2, 0 lines */ + 0x00000000, + 0x00000000, /* avs_wrap, bb, 0 lines */ + 0x00000000, /* avs_wrap, k2, 0 lines */ + 0x00000000, + 0x00000000, /* bar0_map, bb, 0 lines */ + 0x00000000, /* bar0_map, k2, 0 lines */ + 0x00000000, +}; + /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL @@ -3589,37 +3930,37 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ (IRO[44].base + ((pf_id) * IRO[44].m1)) -static const struct iro iro_arr[47] = { +static const struct iro iro_arr[49] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb0, 0x80, 0x0, 0x0, 0x80}, - {0x6318, 0x20, 0x0, 0x0, 0x20}, + {0x6518, 0x20, 0x0, 0x0, 0x20}, {0xb00, 0x8, 0x0, 0x0, 0x4}, {0xa80, 0x8, 0x0, 0x0, 0x4}, {0x0, 0x8, 0x0, 0x0, 0x2}, {0x80, 0x8, 0x0, 0x0, 0x4}, {0x84, 0x8, 0x0, 0x0, 0x2}, - {0x4bc0, 0x0, 0x0, 0x0, 0x78}, + {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x3df0, 0x0, 0x0, 0x0, 0x78}, {0x29b0, 0x0, 0x0, 0x0, 0x78}, {0x4c38, 0x0, 0x0, 0x0, 0x78}, {0x4990, 0x0, 0x0, 0x0, 0x78}, - {0x7e48, 0x0, 0x0, 0x0, 0x78}, + {0x7f48, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, - {0x60f8, 0x10, 0x0, 0x0, 0x10}, - {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x61f8, 0x10, 0x0, 0x0, 0x10}, + {0xbd20, 0x30, 0x0, 0x0, 0x30}, {0x95b8, 0x30, 0x0, 0x0, 0x30}, {0x4b60, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a0, 0x80, 0x4, 0x0, 0x4}, - {0xc8f0, 0x0, 0x0, 0x0, 0x4}, + {0xc7c8, 0x0, 0x0, 0x0, 0x4}, {0x4ba0, 0x80, 0x0, 0x0, 0x20}, - {0x8050, 0x40, 0x0, 0x0, 0x30}, - {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x8150, 0x40, 0x0, 0x0, 0x30}, + {0xec70, 0x60, 0x0, 0x0, 0x60}, {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xf188, 0x78, 0x0, 0x0, 0x78}, + {0xf1b0, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xacf0, 0x0, 0x0, 0x0, 0xf0}, - {0xade0, 0x8, 0x0, 0x0, 0x8}, + {0xaef8, 0x0, 0x0, 0x0, 0xf0}, + {0xafe8, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@ -3627,16 +3968,18 @@ static const struct iro iro_arr[47] = { {0x0, 0x8, 0x0, 0x0, 0x8}, {0x200, 0x10, 0x8, 0x0, 0x8}, {0xb78, 0x10, 0x8, 0x0, 0x2}, - {0xd888, 0x38, 0x0, 0x0, 0x24}, - {0x12c38, 0x10, 0x0, 0x0, 0x8}, - {0x11aa0, 0x38, 0x0, 0x0, 0x18}, - {0xa8c0, 0x38, 0x0, 0x0, 0x10}, + {0xd9a8, 0x38, 0x0, 0x0, 0x24}, + {0x12988, 0x10, 0x0, 0x0, 0x8}, + {0x11fa0, 0x38, 0x0, 0x0, 0x18}, + {0xa580, 0x38, 0x0, 0x0, 0x10}, {0x86f8, 0x30, 0x0, 0x0, 0x18}, {0x101f8, 0x10, 0x0, 0x0, 0x10}, - {0xdd08, 0x48, 0x0, 0x0, 0x38}, + {0xde28, 0x48, 0x0, 0x0, 0x38}, {0x10660, 0x20, 0x0, 0x0, 0x20}, {0x2b80, 0x80, 0x0, 0x0, 0x10}, {0x5020, 0x10, 0x0, 0x0, 0x10}, + {0xc9b0, 0x30, 0x0, 0x0, 0x10}, + {0xeec0, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ @@ -3724,361 +4067,359 @@ static const struct iro iro_arr[47] = { #define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 #define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 #define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702 #define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 -#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705 -#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29839 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29967 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29968 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29967 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29968 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29969 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29970 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29971 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29972 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29973 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29974 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29975 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29976 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29977 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29978 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29979 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29980 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29981 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29982 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29983 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29984 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29985 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29986 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29987 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29988 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29989 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29990 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29991 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29992 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29993 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29994 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29995 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29996 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29997 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29998 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29999 +#define QM_REG_PQTX2PF_40_RT_OFFSET 30000 +#define QM_REG_PQTX2PF_41_RT_OFFSET 30001 +#define QM_REG_PQTX2PF_42_RT_OFFSET 30002 +#define QM_REG_PQTX2PF_43_RT_OFFSET 30003 +#define QM_REG_PQTX2PF_44_RT_OFFSET 30004 +#define QM_REG_PQTX2PF_45_RT_OFFSET 30005 +#define QM_REG_PQTX2PF_46_RT_OFFSET 30006 +#define QM_REG_PQTX2PF_47_RT_OFFSET 30007 +#define QM_REG_PQTX2PF_48_RT_OFFSET 30008 +#define QM_REG_PQTX2PF_49_RT_OFFSET 30009 +#define QM_REG_PQTX2PF_50_RT_OFFSET 30010 +#define QM_REG_PQTX2PF_51_RT_OFFSET 30011 +#define QM_REG_PQTX2PF_52_RT_OFFSET 30012 +#define QM_REG_PQTX2PF_53_RT_OFFSET 30013 +#define QM_REG_PQTX2PF_54_RT_OFFSET 30014 +#define QM_REG_PQTX2PF_55_RT_OFFSET 30015 +#define QM_REG_PQTX2PF_56_RT_OFFSET 30016 +#define QM_REG_PQTX2PF_57_RT_OFFSET 30017 +#define QM_REG_PQTX2PF_58_RT_OFFSET 30018 +#define QM_REG_PQTX2PF_59_RT_OFFSET 30019 +#define QM_REG_PQTX2PF_60_RT_OFFSET 30020 +#define QM_REG_PQTX2PF_61_RT_OFFSET 30021 +#define QM_REG_PQTX2PF_62_RT_OFFSET 30022 +#define QM_REG_PQTX2PF_63_RT_OFFSET 30023 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30510 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30564 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30767 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30769 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30821 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30823 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30801 +#define QM_REG_RLPFCRD_RT_OFFSET 30855 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30817 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819 +#define QM_REG_RLPFENABLE_RT_OFFSET 30871 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30851 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31011 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31012 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013 +#define QM_REG_WFQPFCRD_RT_OFFSET 30905 +#define QM_REG_WFQPFCRD_RT_SIZE 256 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31161 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31162 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31525 +#define QM_REG_TXPQMAP_RT_OFFSET 31675 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32549 +#define QM_REG_WFQVPCRD_RT_OFFSET 32699 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33061 +#define QM_REG_WFQVPMAP_RT_OFFSET 33211 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 +#define QM_REG_VOQCRDLINE_RT_OFFSET 34043 +#define QM_REG_VOQCRDLINE_RT_SIZE 36 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126 #define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926 - -#define RUNTIME_ARRAY_SIZE 33927 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308 + +#define RUNTIME_ARRAY_SIZE 34309 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -4307,7 +4648,7 @@ struct xstorm_eth_conn_ag_ctx { #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -4340,7 +4681,7 @@ struct xstorm_eth_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 ereserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -4627,6 +4968,7 @@ enum eth_error_code { ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, ETH_FILTERS_VNI_ADD_FAIL_FULL, ETH_FILTERS_VNI_ADD_FAIL_DUP, + ETH_FILTERS_GFT_UPDATE_FAIL, MAX_ETH_ERROR_CODE }; @@ -4879,6 +5221,39 @@ enum gft_logic_filter_type { MAX_GFT_LOGIC_FILTER_TYPE }; +struct rx_add_openflow_filter_data { + __le16 action_icid; + u8 priority; + u8 reserved0; + __le32 tenant_id; + __le16 dst_mac_hi; + __le16 dst_mac_mid; + __le16 dst_mac_lo; + __le16 src_mac_hi; + __le16 src_mac_mid; + __le16 src_mac_lo; + __le16 vlan_id; + __le16 l2_eth_type; + u8 ipv4_dscp; + u8 ipv4_frag_type; + u8 ipv4_over_ip; + u8 tenant_id_exists; + __le32 ipv4_dst_addr; + __le32 ipv4_src_addr; + __le16 l4_dst_port; + __le16 l4_src_port; +}; + +struct rx_create_gft_action_data { + u8 vport_id; + u8 reserved[7]; +}; + +struct rx_create_openflow_action_data { + u8 vport_id; + u8 reserved[7]; +}; + /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -4956,7 +5331,7 @@ struct rx_update_gft_filter_data { u8 vport_id; u8 filter_type; u8 filter_action; - u8 reserved; + u8 assert_on_error; }; /* Ramrod data for rx queue start ramrod */ @@ -5102,203 +5477,6 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct gft_cam_line { - __le32 camline; -#define GFT_CAM_LINE_VALID_MASK 0x1 -#define GFT_CAM_LINE_VALID_SHIFT 0 -#define GFT_CAM_LINE_DATA_MASK 0x3FFF -#define GFT_CAM_LINE_DATA_SHIFT 1 -#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF -#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 -#define GFT_CAM_LINE_RESERVED1_MASK 0x7 -#define GFT_CAM_LINE_RESERVED1_SHIFT 29 -}; - -struct gft_cam_line_mapped { - __le32 camline; -#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF -#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 -#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 -#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 -}; - -union gft_cam_line_union { - struct gft_cam_line cam_line; - struct gft_cam_line_mapped cam_line_mapped; -}; - -enum gft_profile_ip_version { - GFT_PROFILE_IPV4 = 0, - GFT_PROFILE_IPV6 = 1, - MAX_GFT_PROFILE_IP_VERSION -}; - -enum gft_profile_upper_protocol_type { - GFT_PROFILE_ROCE_PROTOCOL = 0, - GFT_PROFILE_RROCE_PROTOCOL = 1, - GFT_PROFILE_FCOE_PROTOCOL = 2, - GFT_PROFILE_ICMP_PROTOCOL = 3, - GFT_PROFILE_ARP_PROTOCOL = 4, - GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, - GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, - GFT_PROFILE_TCP_PROTOCOL = 7, - GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, - GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, - GFT_PROFILE_UDP_PROTOCOL = 10, - GFT_PROFILE_USER_IP_1_INNER = 11, - GFT_PROFILE_USER_IP_2_OUTER = 12, - GFT_PROFILE_USER_ETH_1_INNER = 13, - GFT_PROFILE_USER_ETH_2_OUTER = 14, - GFT_PROFILE_RAW = 15, - MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE -}; - -struct gft_ram_line { - __le32 low32bits; -#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 -#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 -#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 -#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 -#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 -#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 -#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 -#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 -#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 -#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 -#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 -#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 -#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 -#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 -#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 -#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 -#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 -#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 -#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 -#define GFT_RAM_LINE_TTL_MASK 0x1 -#define GFT_RAM_LINE_TTL_SHIFT 18 -#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 -#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 -#define GFT_RAM_LINE_RESERVED0_MASK 0x1 -#define GFT_RAM_LINE_RESERVED0_SHIFT 20 -#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 -#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 -#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 -#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 -#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 -#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 -#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 -#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 -#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 -#define GFT_RAM_LINE_DST_PORT_MASK 0x1 -#define GFT_RAM_LINE_DST_PORT_SHIFT 30 -#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 -#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 - __le32 high32bits; -#define GFT_RAM_LINE_DSCP_MASK 0x1 -#define GFT_RAM_LINE_DSCP_SHIFT 0 -#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 -#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 -#define GFT_RAM_LINE_DST_IP_MASK 0x1 -#define GFT_RAM_LINE_DST_IP_SHIFT 2 -#define GFT_RAM_LINE_SRC_IP_MASK 0x1 -#define GFT_RAM_LINE_SRC_IP_SHIFT 3 -#define GFT_RAM_LINE_PRIORITY_MASK 0x1 -#define GFT_RAM_LINE_PRIORITY_SHIFT 4 -#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 -#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 -#define GFT_RAM_LINE_VLAN_MASK 0x1 -#define GFT_RAM_LINE_VLAN_SHIFT 6 -#define GFT_RAM_LINE_DST_MAC_MASK 0x1 -#define GFT_RAM_LINE_DST_MAC_SHIFT 7 -#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 -#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 -#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 -#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 -#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF -#define GFT_RAM_LINE_RESERVED1_SHIFT 10 -}; - -struct mstorm_eth_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - struct xstorm_eth_conn_agctxdq_ext_ldpart { u8 reserved0; u8 eth_state; @@ -5511,7 +5689,7 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { #define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5528,6 +5706,43 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { __le32 reg4; }; +struct mstorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + struct xstorm_eth_hw_conn_ag_ctx { u8 reserved0; u8 eth_state; @@ -5740,7 +5955,7 @@ struct xstorm_eth_hw_conn_ag_ctx { #define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5748,6 +5963,200 @@ struct xstorm_eth_hw_conn_ag_ctx { __le16 conn_dpi; }; +struct gft_cam_line { + __le32 camline; +#define GFT_CAM_LINE_VALID_MASK 0x1 +#define GFT_CAM_LINE_VALID_SHIFT 0 +#define GFT_CAM_LINE_DATA_MASK 0x3FFF +#define GFT_CAM_LINE_DATA_SHIFT 1 +#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF +#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 +#define GFT_CAM_LINE_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_RESERVED1_SHIFT 29 +}; + +struct gft_cam_line_mapped { + __le32 camline; +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + +union gft_cam_line_union { + struct gft_cam_line cam_line; + struct gft_cam_line_mapped cam_line_mapped; +}; + +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + +struct gft_profile_key { + __le16 profile_key; +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +enum gft_profile_tunnel_type { + GFT_PROFILE_NO_TUNNEL = 0, + GFT_PROFILE_VXLAN_TUNNEL = 1, + GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2, + GFT_PROFILE_GRE_IP_TUNNEL = 3, + GFT_PROFILE_GENEVE_MAC_TUNNEL = 4, + GFT_PROFILE_GENEVE_IP_TUNNEL = 5, + MAX_GFT_PROFILE_TUNNEL_TYPE +}; + +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + +struct gft_ram_line { + __le32 lo; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 hi; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + +enum gft_vlan_select { + INNER_PROVIDER_VLAN = 0, + INNER_VLAN = 1, + OUTER_PROVIDER_VLAN = 2, + OUTER_VLAN = 3, + MAX_GFT_VLAN_SELECT +}; + struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; @@ -5827,12 +6236,9 @@ struct rdma_init_func_hdr { u8 cnq_start_offset; u8 num_cnqs; u8 cq_ring_mode; - u8 cnp_vlan_priority; - __le32 cnp_send_timeout; - u8 cnp_dscp; u8 vf_id; u8 vf_valid; - u8 reserved[5]; + u8 reserved[3]; }; struct rdma_init_func_ramrod_data { @@ -5856,54 +6262,55 @@ enum rdma_ramrod_cmd_id { }; struct rdma_register_tid_ramrod_data { - __le32 flags; -#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF -#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F -#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18 -#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30 -#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31 + __le16 flags; +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 u8 flags1; -#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 u8 flags2; -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 u8 key; u8 length_hi; u8 vf_id; u8 vf_valid; __le16 pd; + __le16 reserved2; __le32 length_lo; __le32 itid; - __le32 reserved2; + __le32 reserved3; struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; struct regpair dif_runt_addr; - __le32 reserved3[2]; + __le32 reserved4[2]; }; struct rdma_resize_cq_output_params { @@ -6149,6 +6556,233 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 + u8 flags1; +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 + u8 flags7; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + struct mstorm_rdma_conn_ag_ctx { u8 byte0; u8 byte1; @@ -6438,233 +7072,6 @@ struct ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { - u8 reserved0; - u8 state; - u8 flags0; -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 - u8 flags1; -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 - u8 flags2; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 - u8 flags3; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 - u8 flags4; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 - u8 flags5; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 - u8 flags6; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 - u8 flags7; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 - u8 flags10; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 - u8 flags11; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 - u8 byte2; - __le16 physical_q0; - __le16 word1; - __le16 word2; - __le16 word3; - __le16 word4; - __le16 word5; - __le16 conn_dpi; - u8 byte3; - u8 byte4; - u8 byte5; - u8 byte6; - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 snd_nxt_psn; - __le32 reg4; -}; - struct xstorm_rdma_conn_ag_ctx { u8 reserved0; u8 state; @@ -6696,8 +7103,8 @@ struct xstorm_rdma_conn_ag_ctx { #define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 #define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 #define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 #define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 #define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 #define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 @@ -7093,16 +7500,35 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +struct roce_events_stats { + __le16 silent_drops; + __le16 rnr_naks_sent; + __le32 retransmit_count; + __le32 icrc_error_count; + __le32 reserved; +}; + enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, ROCE_EVENT_QUERY_QP, ROCE_EVENT_DESTROY_QP, + ROCE_EVENT_CREATE_UD_QP, + ROCE_EVENT_DESTROY_UD_QP, MAX_ROCE_EVENT_OPCODE }; +struct roce_init_func_params { + u8 ll2_queue_id; + u8 cnp_vlan_priority; + u8 cnp_dscp; + u8 reserved; + __le32 cnp_send_timeout; +}; + struct roce_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; + struct roce_init_func_params roce; }; struct roce_modify_qp_req_ramrod_data { @@ -7222,6 +7648,8 @@ enum roce_ramrod_cmd_id { ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_QUERY_QP, ROCE_RAMROD_DESTROY_QP, + ROCE_RAMROD_CREATE_UD_QP, + ROCE_RAMROD_DESTROY_UD_QP, MAX_ROCE_RAMROD_CMD_ID }; @@ -7299,13 +7727,6 @@ struct mstorm_roce_resp_conn_ag_ctx { __le32 reg1; }; -enum roce_flavor { - PLAIN_ROCE /* RoCE v1 */ , - RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ , - RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ , - MAX_ROCE_FLAVOR -}; - struct tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; @@ -7416,8 +7837,8 @@ struct tstorm_roce_resp_conn_ag_ctx { u8 flags0; #define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 @@ -8097,7 +8518,7 @@ struct xstorm_roce_resp_conn_ag_ctx { __le16 irq_prod; __le16 word3; __le16 word4; - __le16 word5; + __le16 ereserved1; __le16 irq_cons; u8 rxmit_opcode; u8 byte4; @@ -8200,6 +8621,812 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +enum roce_flavor { + PLAIN_ROCE, + RROCE_IPV4, + RROCE_IPV6, + MAX_ROCE_FLAVOR +}; + +struct ystorm_iwarp_conn_st_ctx { + __le32 reserved[4]; +}; + +struct pstorm_iwarp_conn_st_ctx { + __le32 reserved[36]; +}; + +struct xstorm_iwarp_conn_st_ctx { + __le32 reserved[44]; +}; + +struct xstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 + u8 flags2; +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 sq_comp_cons; + __le16 sq_tx_cons; + __le16 sq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 rewinded_snd_max; + __le32 rd_msn; + __le16 irq_prod_via_msdm; + __le16 irq_cons; + __le16 hq_cons_th_or_mpa_data; + __le16 hq_cons; + __le32 atom_msn; + __le32 orq_cons; + __le32 orq_cons_th; + u8 byte7; + u8 max_ord; + u8 wqe_data_pad_bytes; + u8 former_hq_prod; + u8 irq_prod_via_msem; + u8 byte12; + u8 max_pkt_pdu_size_lo; + u8 max_pkt_pdu_size_hi; + u8 byte15; + u8 e5_reserved; + __le16 e5_reserved4; + __le32 reg10; + __le32 reg11; + __le32 shared_queue_page_addr_lo; + __le32 shared_queue_page_addr_hi; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 unaligned_nxt_seq; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 orq_cache_idx; + u8 hq_prod; + __le16 sq_tx_cons_th; + u8 orq_prod; + u8 irq_cons; + __le16 sq_tx_cons; + __le16 conn_dpi; + __le16 rq_prod; + __le32 snd_seq; + __le32 last_hq_sequence; +}; + +struct tstorm_iwarp_conn_st_ctx { + __le32 reserved[60]; +}; + +struct mstorm_iwarp_conn_st_ctx { + __le32 reserved[32]; +}; + +struct ustorm_iwarp_conn_st_ctx { + __le32 reserved[24]; +}; + +struct iwarp_conn_context { + struct ystorm_iwarp_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_iwarp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_iwarp_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_iwarp_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iwarp_conn_st_ctx mstorm_st_context; + struct ustorm_iwarp_conn_st_ctx ustorm_st_context; +}; + +struct iwarp_create_qp_ramrod_data { + u8 flags; +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 + u8 reserved1; + __le16 pd; + __le16 sq_num_pages; + __le16 rq_num_pages; + __le32 reserved3[2]; + struct regpair qp_handle_for_cqe; + struct rdma_srq_id srq_id; + __le32 cq_cid_for_sq; + __le32 cq_cid_for_rq; + __le16 dpi; + __le16 physical_q0; + __le16 physical_q1; + u8 reserved2[6]; +}; + +enum iwarp_eqe_async_opcode { + IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, + IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_CID_CLEANED, + IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, + IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, + IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + MAX_IWARP_EQE_ASYNC_OPCODE +}; + +struct iwarp_eqe_data_mpa_async_completion { + __le16 ulp_data_len; + u8 reserved[6]; +}; + +struct iwarp_eqe_data_tcp_async_completion { + __le16 ulp_data_len; + u8 mpa_handshake_mode; + u8 reserved[5]; +}; + +enum iwarp_eqe_sync_opcode { + IWARP_EVENT_TYPE_TCP_OFFLOAD = + 11, + IWARP_EVENT_TYPE_MPA_OFFLOAD, + IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, + IWARP_EVENT_TYPE_CREATE_QP, + IWARP_EVENT_TYPE_QUERY_QP, + IWARP_EVENT_TYPE_MODIFY_QP, + IWARP_EVENT_TYPE_DESTROY_QP, + MAX_IWARP_EQE_SYNC_OPCODE +}; + +enum iwarp_fw_return_code { + IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, + IWARP_CONN_ERROR_TCP_CONNECTION_RST, + IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT, + IWARP_CONN_ERROR_MPA_ERROR_REJECT, + IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER, + IWARP_CONN_ERROR_MPA_RST, + IWARP_CONN_ERROR_MPA_FIN, + IWARP_CONN_ERROR_MPA_RTR_MISMATCH, + IWARP_CONN_ERROR_MPA_INSUF_IRD, + IWARP_CONN_ERROR_MPA_INVALID_PACKET, + IWARP_CONN_ERROR_MPA_LOCAL_ERROR, + IWARP_CONN_ERROR_MPA_TIMEOUT, + IWARP_CONN_ERROR_MPA_TERMINATE, + IWARP_QP_IN_ERROR_GOOD_CLOSE, + IWARP_QP_IN_ERROR_BAD_CLOSE, + IWARP_EXCEPTION_DETECTED_LLP_CLOSED, + IWARP_EXCEPTION_DETECTED_LLP_RESET, + IWARP_EXCEPTION_DETECTED_IRQ_FULL, + IWARP_EXCEPTION_DETECTED_RQ_EMPTY, + IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, + IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, + IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, + IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC, + IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR, + IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR, + IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED, + MAX_IWARP_FW_RETURN_CODE +}; + +struct iwarp_init_func_params { + u8 ll2_ooo_q_index; + u8 reserved1[7]; +}; + +struct iwarp_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; + struct tcp_init_params tcp; + struct iwarp_init_func_params iwarp; +}; + +enum iwarp_modify_qp_new_state_type { + IWARP_MODIFY_QP_STATE_CLOSING = 1, + IWARP_MODIFY_QP_STATE_ERROR = + 2, + MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE +}; + +struct iwarp_modify_qp_ramrod_data { + __le16 transition_to_state; + __le16 flags; +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 + __le32 reserved3[3]; + __le32 reserved4[8]; +}; + +struct mpa_rq_params { + __le32 ird; + __le32 ord; +}; + +struct mpa_ulp_buffer { + struct regpair addr; + __le16 len; + __le16 reserved[3]; +}; + +struct mpa_outgoing_params { + u8 crc_needed; + u8 reject; + u8 reserved[6]; + struct mpa_rq_params out_rq; + struct mpa_ulp_buffer outgoing_ulp_buffer; +}; + +struct iwarp_mpa_offload_ramrod_data { + struct mpa_outgoing_params common; + __le32 tcp_cid; + u8 mode; + u8 tcp_connect_side; + u8 rtr_pref; +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 + u8 reserved2; + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + struct regpair shared_queue_addr; + u8 stats_counter_id; + u8 reserved3[15]; +}; + +struct iwarp_offload_params { + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + __le16 physical_q0; + __le16 physical_q1; + u8 stats_counter_id; + u8 mpa_mode; + u8 reserved[10]; +}; + +struct iwarp_query_qp_output_params { + __le32 flags; +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + u8 reserved1[4]; +}; + +struct iwarp_query_qp_ramrod_data { + struct regpair output_params_addr; +}; + +enum iwarp_ramrod_cmd_id { + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = + 11, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + IWARP_RAMROD_CMD_ID_CREATE_QP, + IWARP_RAMROD_CMD_ID_QUERY_QP, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + MAX_IWARP_RAMROD_CMD_ID +}; + +struct iwarp_rxmit_stats_drv { + struct regpair tx_go_to_slow_start_event_cnt; + struct regpair tx_fast_retransmit_event_cnt; +}; + +struct iwarp_tcp_offload_ramrod_data { + struct iwarp_offload_params iwarp; + struct tcp_offload_params_opt2 tcp; +}; + +enum mpa_negotiation_mode { + MPA_NEGOTIATION_TYPE_BASIC = 1, + MPA_NEGOTIATION_TYPE_ENHANCED = 2, + MAX_MPA_NEGOTIATION_MODE +}; + +enum mpa_rtr_type { + MPA_RTR_TYPE_NONE = 0, + MPA_RTR_TYPE_ZERO_SEND = 1, + MPA_RTR_TYPE_ZERO_WRITE = 2, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3, + MPA_RTR_TYPE_ZERO_READ = 4, + MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5, + MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7, + MAX_MPA_RTR_TYPE +}; + +struct unaligned_opaque_data { + __le16 first_mpa_offset; + u8 tcp_payload_offset; + u8 flags; +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 +#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F +#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 + __le32 cid; +}; + +struct mstorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 rcq_cons; + __le16 rcq_cons_th; + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ystorm_iwarp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct ystorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9222,7 +10449,7 @@ struct xstorm_iscsi_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 ereserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -10285,9 +11512,11 @@ struct public_drv_mb { #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 #define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 #define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 #define DRV_MSG_CODE_MCP_RESET 0x00090000 @@ -10444,6 +11673,7 @@ struct public_drv_mb { #define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 #define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 #define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 +#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000 #define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 #define FW_MSG_CODE_NVM_OK 0x00010000 @@ -10451,7 +11681,7 @@ struct public_drv_mb { #define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 #define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 - +#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000 #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 fw_mb_param; @@ -10466,6 +11696,8 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) + u32 drv_pulse_mb; #define DRV_PULSE_SEQ_MASK 0x00007fff #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 @@ -10489,7 +11721,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, MFW_DRV_MSG_RESERVED4, MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_BW_UPDATE5, + MFW_DRV_MSG_S_TAG_UPDATE, MFW_DRV_MSG_GET_LAN_STATS, MFW_DRV_MSG_GET_FCOE_STATS, MFW_DRV_MSG_GET_ISCSI_STATS, @@ -10591,6 +11823,12 @@ struct nvm_cfg1_glob { u32 led_global_settings; u32 generic_cont1; u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 u32 mbi_date; u32 misc_sig; u32 device_capabilities; @@ -10758,6 +11996,8 @@ struct static_init { u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */ }; +#define NVM_MAGIC_VALUE 0x669955aa + enum nvm_image_type { NVM_TYPE_TIM1 = 0x01, NVM_TYPE_TIM2 = 0x02, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 0a8fde629991..b069ad088269 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -40,31 +40,17 @@ #include "qed_init_ops.h" #include "qed_reg_addr.h" -enum cminterface { - MCM_SEC, - MCM_PRI, - UCM_SEC, - UCM_PRI, - TCM_SEC, - TCM_PRI, - YCM_SEC, - YCM_PRI, - XCM_SEC, - XCM_PRI, - NUM_OF_CM_INTERFACES -}; - -/* general constants */ +/* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 0x100) - 1 : 0) #define QM_INVALID_PQ_ID 0xffff -/* feature enable */ +/* Feature enable */ #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 -/* other PQ constants */ +/* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 /* WFQ constants */ #define QM_WFQ_UPPER_BOUND 62500000 @@ -106,20 +92,21 @@ enum cminterface { #define BTB_PURE_LB_FACTOR 10 #define BTB_PURE_LB_RATIO 7 /* QM stop command constants */ -#define QM_STOP_PQ_MASK_WIDTH 32 -#define QM_STOP_CMD_ADDR 0x2 -#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 -#define QM_STOP_CMD_PAUSE_MASK_MASK -1 -#define QM_STOP_CMD_GROUP_ID_OFFSET 1 -#define QM_STOP_CMD_GROUP_ID_SHIFT 16 -#define QM_STOP_CMD_GROUP_ID_MASK 15 -#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 -#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 -#define QM_STOP_CMD_PQ_TYPE_MASK 1 -#define QM_STOP_CMD_MAX_POLL_COUNT 100 -#define QM_STOP_CMD_POLL_PERIOD_US 500 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 + /* QM command macros */ #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ _STRUCT_SIZE @@ -146,16 +133,17 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { - /* enable RLs for all VOQs */ + /* Enable RLs for all VOQs */ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (1 << MAX_NUM_VOQS) - 1); - /* write RL period */ + /* Write RL period */ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, @@ -167,7 +155,8 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, @@ -180,14 +169,15 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); if (vport_rl_en) { - /* write RL period (use timer 0 only) */ + /* Write RL period (use timer 0 only) */ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, @@ -200,7 +190,8 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, @@ -208,7 +199,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) } /* Prepare runtime init values to allocate PBF command queue lines for - * the specified VOQ + * the specified VOQ. */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, u8 voq, u16 cmdq_lines) @@ -232,7 +223,7 @@ static void qed_cmdq_lines_rt_init( { u8 tc, voq, port_id, num_tcs_in_port; - /* clear PBF lines for all VOQs */ + /* Clear PBF lines for all VOQs */ for (voq = 0; voq < MAX_NUM_VOQS; voq++) STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); for (port_id = 0; port_id < max_ports_per_engine; port_id++) { @@ -285,7 +276,7 @@ static void qed_btb_blocks_rt_init( if (!port_params[port_id].active) continue; - /* subtract headroom blocks */ + /* Subtract headroom blocks */ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; @@ -305,7 +296,7 @@ static void qed_btb_blocks_rt_init( phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; - /* init physical TCs */ + /* Init physical TCs */ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) != 1) @@ -317,7 +308,7 @@ static void qed_btb_blocks_rt_init( phys_blocks); } - /* init pure LB TC */ + /* Init pure LB TC */ temp = LB_VOQ(port_id); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), pure_lb_blocks); @@ -338,24 +329,24 @@ static void qed_tx_pq_map_rt_init( QM_PF_QUEUE_GROUP_SIZE; u16 i, pq_id, pq_group; - /* a bit per Tx PQ indicating if the PQ is associated with a VF */ + /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); u32 mem_addr_4kb = base_mem_addr_4kb; - /* set mapping from PQ group to PF */ + /* Set mapping from PQ group to PF */ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(p_params->pf_id)); - /* set PQ sizes */ + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_pf_cids)); STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_vf_cids)); - /* go over all Tx PQs */ + /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, p_params->max_phys_tcs_per_port); @@ -366,17 +357,18 @@ static void qed_tx_pq_map_rt_init( (p_params->pq_params[i].vport_id < MAX_QM_GLOBAL_RLS); - /* update first Tx PQ of VPORT/TC */ + /* Update first Tx PQ of VPORT/TC */ u8 vport_id_in_pf = p_params->pq_params[i].vport_id - p_params->start_vport; u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; if (first_tx_pq_id == QM_INVALID_PQ_ID) { - /* create new VP PQ */ + /* Create new VP PQ */ pq_ids[p_params->pq_params[i].tc_id] = pq_id; first_tx_pq_id = pq_id; - /* map VP PQ to VOQ and PF */ + + /* Map VP PQ to VOQ and PF */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, @@ -388,7 +380,7 @@ static void qed_tx_pq_map_rt_init( if (p_params->pq_params[i].rl_valid && !rl_valid) DP_NOTICE(p_hwfn, "Invalid VPORT ID for rate limiter configuration"); - /* fill PQ map entry */ + /* Fill PQ map entry */ memset(&tx_pq_map, 0, sizeof(tx_pq_map)); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, @@ -400,18 +392,16 @@ static void qed_tx_pq_map_rt_init( SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, p_params->pq_params[i].wrr_group); - /* write PQ map entry to CAM */ + /* Write PQ map entry to CAM */ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&tx_pq_map)); - /* set base address */ + /* Set base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); - /* check if VF PQ */ + + /* If VF PQ, add indication to PQ VF mask */ if (is_vf_pq) { - /* if PQ is associated with a VF, add indication - * to PQ VF mask - */ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); @@ -421,16 +411,12 @@ static void qed_tx_pq_map_rt_init( } } - /* store Tx PQ VF mask to size select register */ - for (i = 0; i < num_tx_pq_vf_masks; i++) { - if (tx_pq_vf_mask[i]) { - u32 addr; - - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - STORE_RT_REG(p_hwfn, addr, + /* Store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) + if (tx_pq_vf_mask[i]) + STORE_RT_REG(p_hwfn, + QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); - } - } } /* Prepare Other PQ mapping runtime init values for the specified PF */ @@ -440,23 +426,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { - u16 i, pq_id; + u32 pq_size, pq_mem_4kb, mem_addr_4kb; + u16 i, pq_id, pq_group; /* a single other PQ group is used in each PF, * where PQ group i is used in PF i. */ - u16 pq_group = pf_id; - u32 pq_size = num_pf_cids + num_tids; - u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); - u32 mem_addr_4kb = base_mem_addr_4kb; + pq_group = pf_id; + pq_size = num_pf_cids + num_tids; + pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + mem_addr_4kb = base_mem_addr_4kb; - /* map PQ group to PF */ + /* Map PQ group to PF */ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); - /* set PQ sizes */ + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* set base address */ + + /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { STORE_RT_REG(p_hwfn, @@ -485,7 +473,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -514,7 +502,7 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, @@ -535,7 +523,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u32 inc_val; u8 tc, i; - /* go over all PF VPORTs */ + /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { if (!vport_params[i].vport_wfq) @@ -544,7 +532,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); if (inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, - "Invalid VPORT WFQ weight configuration"); + "Invalid VPORT WFQ weight configuration\n"); return -1; } @@ -578,17 +566,17 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); + "Invalid VPORT ID for rate limiter configuration\n"); return -1; } - /* go over all PF VPORTs */ + /* Go over all PF VPORTs */ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); if (inc_val > QM_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, - "Invalid VPORT rate-limit configuration"); + "Invalid VPORT rate-limit configuration\n"); return -1; } @@ -617,7 +605,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); } - /* check if timeout while waiting for SDM command ready */ + /* Check if timeout while waiting for SDM command ready */ if (i == QM_STOP_CMD_MAX_POLL_COUNT) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Timeout when waiting for QM SDM command ready signal\n"); @@ -701,16 +689,16 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, QM_OTHER_PQS_PER_PF; u8 tc, i; - /* clear first Tx PQ ID array for each VPORT */ + /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; - /* map Other PQs (if any) */ + /* Map Other PQs (if any) */ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, p_params->num_pf_cids, p_params->num_tids, 0); - /* map Tx PQs */ + /* Map Tx PQs */ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); if (p_params->pf_wfq) @@ -736,7 +724,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -750,7 +738,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } @@ -766,17 +754,18 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) { - u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); + u16 vport_pq_id; + u32 inc_val; u8 tc; + inc_val = QM_WFQ_INC_VAL(vport_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n"); return -1; } for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = first_tx_pq_id[tc]; - + vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, @@ -793,12 +782,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, if (vport_id >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); + "Invalid VPORT ID for rate limiter configuration\n"); return -1; } if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); + DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } @@ -818,15 +807,15 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; - /* set command's PQ type */ + /* Set command's PQ type */ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { - /* set PQ bit in mask (stop command only) */ + /* Set PQ bit in mask (stop command only) */ if (!is_release_cmd) pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); - /* if last PQ or end of PQ mask, write command */ + /* If last PQ or end of PQ mask, write command */ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { @@ -962,8 +951,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define T_ETH_PACKET_ACTION_GFT_EVENTID 23 +#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 -#define PARSER_ETH_CONN_CM_HDR (0x0) +#define PARSER_ETH_CONN_CM_HDR 0 #define CAM_LINE_SIZE sizeof(u32) #define RAM_LINE_SIZE sizeof(u64) #define REG_SIZE sizeof(u32) @@ -971,40 +962,26 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - union gft_cam_line_union camline; - struct gft_ram_line ramline; - u32 *p_ramline, i; - - p_ramline = (u32 *)&ramline; + u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM + + pf_id * RAM_LINE_SIZE; /*stop using gft logic */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0); - memset(&camline, 0, sizeof(union gft_cam_line_union)); - qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, - camline.cam_line_mapped.camline); - memset(&ramline, 0, sizeof(ramline)); - - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { - u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; - - hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE); - - qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i)); - } + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); + qed_wr(p_hwfn, p_ptt, hw_addr, 0); + qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0); } void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id, bool tcp, bool udp, bool ipv4, bool ipv6) { - u32 rfs_cm_hdr_event_id, *p_ramline; union gft_cam_line_union camline; struct gft_ram_line ramline; - int i; + u32 rfs_cm_hdr_event_id; rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); - p_ramline = (u32 *)&ramline; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1024,18 +1001,20 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); camline.cam_line_mapped.camline = 0; - /* cam line is now valid!! */ + /* Cam line is now valid!! */ SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1); /* filters are per PF!! */ SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1); + GFT_CAM_LINE_MAPPED_PF_ID_MASK, + GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); if (!(tcp && udp)) { SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1); + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); if (tcp) SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, @@ -1059,34 +1038,38 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, GFT_PROFILE_IPV6); } - /* write characteristics to cam */ + /* Write characteristics to cam */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, camline.cam_line_mapped.camline); camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); - /* write line to RAM - compare to filter 4 tuple */ - ramline.low32bits = 0; - ramline.high32bits = 0; - SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1); - - /* each iteration write to reg */ - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + - i * REG_SIZE, *(p_ramline + i)); - - /* set default profile so that no filter match will happen */ - ramline.low32bits = 0xffff; - ramline.high32bits = 0xffff; - - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * - PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE, - *(p_ramline + i)); + /* Write line to RAM - compare to filter 4 tuple */ + ramline.lo = 0; + ramline.hi = 0; + SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1); + + /* Each iteration write to reg */ + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + ramline.lo); + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4, + ramline.hi); + + /* Set default profile so that no filter match will happen */ + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, + ramline.lo); + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4, + ramline.hi); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 4a2e7be5bf72..e3f368882f46 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -158,6 +158,7 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) GFP_KERNEL); if (!rt_data->init_val) { kfree(rt_data->b_valid); + rt_data->b_valid = NULL; return -ENOMEM; } @@ -167,7 +168,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) void qed_init_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->rt_data.init_val); + p_hwfn->rt_data.init_val = NULL; kfree(p_hwfn->rt_data.b_valid); + p_hwfn->rt_data.b_valid = NULL; } static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, @@ -525,6 +528,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, } kfree(p_hwfn->unzip_buf); + p_hwfn->unzip_buf = NULL; return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 40f057edeafc..719cdbfe1695 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -90,6 +90,12 @@ struct aeu_invert_reg_bit { /* Multiple bits start with this offset */ #define ATTENTION_OFFSET_MASK (0x000ff000) #define ATTENTION_OFFSET_SHIFT (12) + +#define ATTENTION_BB_MASK (0x00700000) +#define ATTENTION_BB_SHIFT (20) +#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) +#define ATTENTION_BB_DIFFERENT BIT(23) + unsigned int flags; /* Callback to call if attention will be triggered */ @@ -105,1215 +111,6 @@ struct aeu_invert_reg { #define MAX_ATTN_GRPS (8) #define NUM_ATTN_REGS (9) -/* HW Attention register */ -struct attn_hw_reg { - u16 reg_idx; /* Index of this register in its block */ - u16 num_of_bits; /* number of valid attention bits */ - u32 sts_addr; /* Address of the STS register */ - u32 sts_clr_addr; /* Address of the STS_CLR register */ - u32 sts_wr_addr; /* Address of the STS_WR register */ - u32 mask_addr; /* Address of the MASK register */ -}; - -/* HW block attention registers */ -struct attn_hw_regs { - u16 num_of_int_regs; /* Number of interrupt regs */ - u16 num_of_prty_regs; /* Number of parity regs */ - struct attn_hw_reg **int_regs; /* interrupt regs */ - struct attn_hw_reg **prty_regs; /* parity regs */ -}; - -/* HW block attention registers */ -struct attn_hw_block { - const char *name; /* Block name */ - struct attn_hw_regs chip_regs[1]; -}; - -static struct attn_hw_reg grc_int0_bb_b0 = { - 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184}; - -static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { - &grc_int0_bb_b0}; - -static struct attn_hw_reg grc_prty1_bb_b0 = { - 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204}; - -static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { - &grc_prty1_bb_b0}; - -static struct attn_hw_reg miscs_int0_bb_b0 = { - 0, 3, 0x9180, 0x918c, 0x9188, 0x9184}; - -static struct attn_hw_reg miscs_int1_bb_b0 = { - 1, 11, 0x9190, 0x919c, 0x9198, 0x9194}; - -static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { - &miscs_int0_bb_b0, &miscs_int1_bb_b0}; - -static struct attn_hw_reg miscs_prty0_bb_b0 = { - 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4}; - -static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { - &miscs_prty0_bb_b0}; - -static struct attn_hw_reg misc_int0_bb_b0 = { - 0, 1, 0x8180, 0x818c, 0x8188, 0x8184}; - -static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { - &misc_int0_bb_b0}; - -static struct attn_hw_reg pglue_b_int0_bb_b0 = { - 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184}; - -static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { - &pglue_b_int0_bb_b0}; - -static struct attn_hw_reg pglue_b_prty0_bb_b0 = { - 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194}; - -static struct attn_hw_reg pglue_b_prty1_bb_b0 = { - 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204}; - -static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { - &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0}; - -static struct attn_hw_reg cnig_int0_bb_b0 = { - 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec}; - -static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { - &cnig_int0_bb_b0}; - -static struct attn_hw_reg cnig_prty0_bb_b0 = { - 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c}; - -static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { - &cnig_prty0_bb_b0}; - -static struct attn_hw_reg cpmu_int0_bb_b0 = { - 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4}; - -static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { - &cpmu_int0_bb_b0}; - -static struct attn_hw_reg ncsi_int0_bb_b0 = { - 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0}; - -static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { - &ncsi_int0_bb_b0}; - -static struct attn_hw_reg ncsi_prty1_bb_b0 = { - 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004}; - -static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { - &ncsi_prty1_bb_b0}; - -static struct attn_hw_reg opte_prty1_bb_b0 = { - 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004}; - -static struct attn_hw_reg opte_prty0_bb_b0 = { - 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c}; - -static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { - &opte_prty1_bb_b0, &opte_prty0_bb_b0}; - -static struct attn_hw_reg bmb_int0_bb_b0 = { - 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4}; - -static struct attn_hw_reg bmb_int1_bb_b0 = { - 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc}; - -static struct attn_hw_reg bmb_int2_bb_b0 = { - 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4}; - -static struct attn_hw_reg bmb_int3_bb_b0 = { - 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c}; - -static struct attn_hw_reg bmb_int4_bb_b0 = { - 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124}; - -static struct attn_hw_reg bmb_int5_bb_b0 = { - 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c}; - -static struct attn_hw_reg bmb_int6_bb_b0 = { - 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154}; - -static struct attn_hw_reg bmb_int7_bb_b0 = { - 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c}; - -static struct attn_hw_reg bmb_int8_bb_b0 = { - 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188}; - -static struct attn_hw_reg bmb_int9_bb_b0 = { - 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0}; - -static struct attn_hw_reg bmb_int10_bb_b0 = { - 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8}; - -static struct attn_hw_reg bmb_int11_bb_b0 = { - 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0}; - -static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { - &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, - &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, - &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0}; - -static struct attn_hw_reg bmb_prty0_bb_b0 = { - 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0}; - -static struct attn_hw_reg bmb_prty1_bb_b0 = { - 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404}; - -static struct attn_hw_reg bmb_prty2_bb_b0 = { - 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414}; - -static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { - &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0}; - -static struct attn_hw_reg pcie_prty1_bb_b0 = { - 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004}; - -static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { - &pcie_prty1_bb_b0}; - -static struct attn_hw_reg mcp2_prty0_bb_b0 = { - 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044}; - -static struct attn_hw_reg mcp2_prty1_bb_b0 = { - 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208}; - -static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { - &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0}; - -static struct attn_hw_reg pswhst_int0_bb_b0 = { - 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184}; - -static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { - &pswhst_int0_bb_b0}; - -static struct attn_hw_reg pswhst_prty0_bb_b0 = { - 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194}; - -static struct attn_hw_reg pswhst_prty1_bb_b0 = { - 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204}; - -static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { - &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0}; - -static struct attn_hw_reg pswhst2_int0_bb_b0 = { - 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184}; - -static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { - &pswhst2_int0_bb_b0}; - -static struct attn_hw_reg pswhst2_prty0_bb_b0 = { - 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194}; - -static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { - &pswhst2_prty0_bb_b0}; - -static struct attn_hw_reg pswrd_int0_bb_b0 = { - 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184}; - -static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { - &pswrd_int0_bb_b0}; - -static struct attn_hw_reg pswrd_prty0_bb_b0 = { - 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194}; - -static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { - &pswrd_prty0_bb_b0}; - -static struct attn_hw_reg pswrd2_int0_bb_b0 = { - 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184}; - -static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { - &pswrd2_int0_bb_b0}; - -static struct attn_hw_reg pswrd2_prty0_bb_b0 = { - 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194}; - -static struct attn_hw_reg pswrd2_prty1_bb_b0 = { - 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204}; - -static struct attn_hw_reg pswrd2_prty2_bb_b0 = { - 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214}; - -static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { - &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0}; - -static struct attn_hw_reg pswwr_int0_bb_b0 = { - 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184}; - -static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { - &pswwr_int0_bb_b0}; - -static struct attn_hw_reg pswwr_prty0_bb_b0 = { - 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194}; - -static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { - &pswwr_prty0_bb_b0}; - -static struct attn_hw_reg pswwr2_int0_bb_b0 = { - 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184}; - -static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { - &pswwr2_int0_bb_b0}; - -static struct attn_hw_reg pswwr2_prty0_bb_b0 = { - 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194}; - -static struct attn_hw_reg pswwr2_prty1_bb_b0 = { - 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204}; - -static struct attn_hw_reg pswwr2_prty2_bb_b0 = { - 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214}; - -static struct attn_hw_reg pswwr2_prty3_bb_b0 = { - 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224}; - -static struct attn_hw_reg pswwr2_prty4_bb_b0 = { - 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234}; - -static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { - &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, - &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0}; - -static struct attn_hw_reg pswrq_int0_bb_b0 = { - 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184}; - -static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { - &pswrq_int0_bb_b0}; - -static struct attn_hw_reg pswrq_prty0_bb_b0 = { - 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194}; - -static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { - &pswrq_prty0_bb_b0}; - -static struct attn_hw_reg pswrq2_int0_bb_b0 = { - 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184}; - -static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { - &pswrq2_int0_bb_b0}; - -static struct attn_hw_reg pswrq2_prty1_bb_b0 = { - 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204}; - -static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { - &pswrq2_prty1_bb_b0}; - -static struct attn_hw_reg pglcs_int0_bb_b0 = { - 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04}; - -static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { - &pglcs_int0_bb_b0}; - -static struct attn_hw_reg dmae_int0_bb_b0 = { - 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184}; - -static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { - &dmae_int0_bb_b0}; - -static struct attn_hw_reg dmae_prty1_bb_b0 = { - 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204}; - -static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { - &dmae_prty1_bb_b0}; - -static struct attn_hw_reg ptu_int0_bb_b0 = { - 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184}; - -static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { - &ptu_int0_bb_b0}; - -static struct attn_hw_reg ptu_prty1_bb_b0 = { - 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204}; - -static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { - &ptu_prty1_bb_b0}; - -static struct attn_hw_reg tcm_int0_bb_b0 = { - 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184}; - -static struct attn_hw_reg tcm_int1_bb_b0 = { - 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194}; - -static struct attn_hw_reg tcm_int2_bb_b0 = { - 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4}; - -static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { - &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0}; - -static struct attn_hw_reg tcm_prty1_bb_b0 = { - 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204}; - -static struct attn_hw_reg tcm_prty2_bb_b0 = { - 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214}; - -static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { - &tcm_prty1_bb_b0, &tcm_prty2_bb_b0}; - -static struct attn_hw_reg mcm_int0_bb_b0 = { - 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184}; - -static struct attn_hw_reg mcm_int1_bb_b0 = { - 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194}; - -static struct attn_hw_reg mcm_int2_bb_b0 = { - 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4}; - -static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { - &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0}; - -static struct attn_hw_reg mcm_prty1_bb_b0 = { - 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204}; - -static struct attn_hw_reg mcm_prty2_bb_b0 = { - 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214}; - -static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { - &mcm_prty1_bb_b0, &mcm_prty2_bb_b0}; - -static struct attn_hw_reg ucm_int0_bb_b0 = { - 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184}; - -static struct attn_hw_reg ucm_int1_bb_b0 = { - 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194}; - -static struct attn_hw_reg ucm_int2_bb_b0 = { - 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4}; - -static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { - &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0}; - -static struct attn_hw_reg ucm_prty1_bb_b0 = { - 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204}; - -static struct attn_hw_reg ucm_prty2_bb_b0 = { - 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214}; - -static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { - &ucm_prty1_bb_b0, &ucm_prty2_bb_b0}; - -static struct attn_hw_reg xcm_int0_bb_b0 = { - 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184}; - -static struct attn_hw_reg xcm_int1_bb_b0 = { - 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194}; - -static struct attn_hw_reg xcm_int2_bb_b0 = { - 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4}; - -static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { - &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0}; - -static struct attn_hw_reg xcm_prty1_bb_b0 = { - 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204}; - -static struct attn_hw_reg xcm_prty2_bb_b0 = { - 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214}; - -static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { - &xcm_prty1_bb_b0, &xcm_prty2_bb_b0}; - -static struct attn_hw_reg ycm_int0_bb_b0 = { - 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184}; - -static struct attn_hw_reg ycm_int1_bb_b0 = { - 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194}; - -static struct attn_hw_reg ycm_int2_bb_b0 = { - 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4}; - -static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { - &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0}; - -static struct attn_hw_reg ycm_prty1_bb_b0 = { - 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204}; - -static struct attn_hw_reg ycm_prty2_bb_b0 = { - 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214}; - -static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { - &ycm_prty1_bb_b0, &ycm_prty2_bb_b0}; - -static struct attn_hw_reg pcm_int0_bb_b0 = { - 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184}; - -static struct attn_hw_reg pcm_int1_bb_b0 = { - 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194}; - -static struct attn_hw_reg pcm_int2_bb_b0 = { - 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4}; - -static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { - &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0}; - -static struct attn_hw_reg pcm_prty1_bb_b0 = { - 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204}; - -static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { - &pcm_prty1_bb_b0}; - -static struct attn_hw_reg qm_int0_bb_b0 = { - 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184}; - -static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { - &qm_int0_bb_b0}; - -static struct attn_hw_reg qm_prty0_bb_b0 = { - 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194}; - -static struct attn_hw_reg qm_prty1_bb_b0 = { - 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204}; - -static struct attn_hw_reg qm_prty2_bb_b0 = { - 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214}; - -static struct attn_hw_reg qm_prty3_bb_b0 = { - 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224}; - -static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { - &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0}; - -static struct attn_hw_reg tm_int0_bb_b0 = { - 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184}; - -static struct attn_hw_reg tm_int1_bb_b0 = { - 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194}; - -static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { - &tm_int0_bb_b0, &tm_int1_bb_b0}; - -static struct attn_hw_reg tm_prty1_bb_b0 = { - 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204}; - -static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { - &tm_prty1_bb_b0}; - -static struct attn_hw_reg dorq_int0_bb_b0 = { - 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184}; - -static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { - &dorq_int0_bb_b0}; - -static struct attn_hw_reg dorq_prty0_bb_b0 = { - 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194}; - -static struct attn_hw_reg dorq_prty1_bb_b0 = { - 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204}; - -static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { - &dorq_prty0_bb_b0, &dorq_prty1_bb_b0}; - -static struct attn_hw_reg brb_int0_bb_b0 = { - 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4}; - -static struct attn_hw_reg brb_int1_bb_b0 = { - 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc}; - -static struct attn_hw_reg brb_int2_bb_b0 = { - 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4}; - -static struct attn_hw_reg brb_int3_bb_b0 = { - 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c}; - -static struct attn_hw_reg brb_int4_bb_b0 = { - 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124}; - -static struct attn_hw_reg brb_int5_bb_b0 = { - 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c}; - -static struct attn_hw_reg brb_int6_bb_b0 = { - 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154}; - -static struct attn_hw_reg brb_int7_bb_b0 = { - 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c}; - -static struct attn_hw_reg brb_int8_bb_b0 = { - 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188}; - -static struct attn_hw_reg brb_int9_bb_b0 = { - 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0}; - -static struct attn_hw_reg brb_int10_bb_b0 = { - 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8}; - -static struct attn_hw_reg brb_int11_bb_b0 = { - 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0}; - -static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { - &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, - &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, - &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0}; - -static struct attn_hw_reg brb_prty0_bb_b0 = { - 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0}; - -static struct attn_hw_reg brb_prty1_bb_b0 = { - 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404}; - -static struct attn_hw_reg brb_prty2_bb_b0 = { - 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414}; - -static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { - &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0}; - -static struct attn_hw_reg src_int0_bb_b0 = { - 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4}; - -static struct attn_hw_reg *src_int_bb_b0_regs[1] = { - &src_int0_bb_b0}; - -static struct attn_hw_reg prs_int0_bb_b0 = { - 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044}; - -static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { - &prs_int0_bb_b0}; - -static struct attn_hw_reg prs_prty0_bb_b0 = { - 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054}; - -static struct attn_hw_reg prs_prty1_bb_b0 = { - 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208}; - -static struct attn_hw_reg prs_prty2_bb_b0 = { - 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218}; - -static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { - &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0}; - -static struct attn_hw_reg tsdm_int0_bb_b0 = { - 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044}; - -static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { - &tsdm_int0_bb_b0}; - -static struct attn_hw_reg tsdm_prty1_bb_b0 = { - 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204}; - -static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { - &tsdm_prty1_bb_b0}; - -static struct attn_hw_reg msdm_int0_bb_b0 = { - 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044}; - -static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { - &msdm_int0_bb_b0}; - -static struct attn_hw_reg msdm_prty1_bb_b0 = { - 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204}; - -static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { - &msdm_prty1_bb_b0}; - -static struct attn_hw_reg usdm_int0_bb_b0 = { - 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044}; - -static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { - &usdm_int0_bb_b0}; - -static struct attn_hw_reg usdm_prty1_bb_b0 = { - 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204}; - -static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { - &usdm_prty1_bb_b0}; - -static struct attn_hw_reg xsdm_int0_bb_b0 = { - 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044}; - -static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { - &xsdm_int0_bb_b0}; - -static struct attn_hw_reg xsdm_prty1_bb_b0 = { - 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204}; - -static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { - &xsdm_prty1_bb_b0}; - -static struct attn_hw_reg ysdm_int0_bb_b0 = { - 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044}; - -static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { - &ysdm_int0_bb_b0}; - -static struct attn_hw_reg ysdm_prty1_bb_b0 = { - 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204}; - -static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { - &ysdm_prty1_bb_b0}; - -static struct attn_hw_reg psdm_int0_bb_b0 = { - 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044}; - -static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { - &psdm_int0_bb_b0}; - -static struct attn_hw_reg psdm_prty1_bb_b0 = { - 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204}; - -static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { - &psdm_prty1_bb_b0}; - -static struct attn_hw_reg tsem_int0_bb_b0 = { - 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044}; - -static struct attn_hw_reg tsem_int1_bb_b0 = { - 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054}; - -static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044}; - -static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { - &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg tsem_prty0_bb_b0 = { - 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc}; - -static struct attn_hw_reg tsem_prty1_bb_b0 = { - 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204}; - -static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { - 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204}; - -static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { - &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, - &tsem_fast_memory_vfc_config_prty1_bb_b0}; - -static struct attn_hw_reg msem_int0_bb_b0 = { - 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044}; - -static struct attn_hw_reg msem_int1_bb_b0 = { - 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054}; - -static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044}; - -static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { - &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg msem_prty0_bb_b0 = { - 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc}; - -static struct attn_hw_reg msem_prty1_bb_b0 = { - 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204}; - -static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { - &msem_prty0_bb_b0, &msem_prty1_bb_b0}; - -static struct attn_hw_reg usem_int0_bb_b0 = { - 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044}; - -static struct attn_hw_reg usem_int1_bb_b0 = { - 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054}; - -static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044}; - -static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { - &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg usem_prty0_bb_b0 = { - 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc}; - -static struct attn_hw_reg usem_prty1_bb_b0 = { - 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204}; - -static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { - &usem_prty0_bb_b0, &usem_prty1_bb_b0}; - -static struct attn_hw_reg xsem_int0_bb_b0 = { - 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044}; - -static struct attn_hw_reg xsem_int1_bb_b0 = { - 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054}; - -static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044}; - -static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { - &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg xsem_prty0_bb_b0 = { - 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc}; - -static struct attn_hw_reg xsem_prty1_bb_b0 = { - 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204}; - -static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { - &xsem_prty0_bb_b0, &xsem_prty1_bb_b0}; - -static struct attn_hw_reg ysem_int0_bb_b0 = { - 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044}; - -static struct attn_hw_reg ysem_int1_bb_b0 = { - 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054}; - -static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044}; - -static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { - &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg ysem_prty0_bb_b0 = { - 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc}; - -static struct attn_hw_reg ysem_prty1_bb_b0 = { - 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204}; - -static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { - &ysem_prty0_bb_b0, &ysem_prty1_bb_b0}; - -static struct attn_hw_reg psem_int0_bb_b0 = { - 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044}; - -static struct attn_hw_reg psem_int1_bb_b0 = { - 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054}; - -static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { - 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044}; - -static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { - &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0}; - -static struct attn_hw_reg psem_prty0_bb_b0 = { - 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc}; - -static struct attn_hw_reg psem_prty1_bb_b0 = { - 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204}; - -static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { - 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204}; - -static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { - &psem_prty0_bb_b0, &psem_prty1_bb_b0, - &psem_fast_memory_vfc_config_prty1_bb_b0}; - -static struct attn_hw_reg rss_int0_bb_b0 = { - 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984}; - -static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { - &rss_int0_bb_b0}; - -static struct attn_hw_reg rss_prty1_bb_b0 = { - 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04}; - -static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { - &rss_prty1_bb_b0}; - -static struct attn_hw_reg tmld_int0_bb_b0 = { - 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184}; - -static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { - &tmld_int0_bb_b0}; - -static struct attn_hw_reg tmld_prty1_bb_b0 = { - 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204}; - -static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { - &tmld_prty1_bb_b0}; - -static struct attn_hw_reg muld_int0_bb_b0 = { - 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184}; - -static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { - &muld_int0_bb_b0}; - -static struct attn_hw_reg muld_prty1_bb_b0 = { - 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204}; - -static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { - &muld_prty1_bb_b0}; - -static struct attn_hw_reg yuld_int0_bb_b0 = { - 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184}; - -static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { - &yuld_int0_bb_b0}; - -static struct attn_hw_reg yuld_prty1_bb_b0 = { - 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204}; - -static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { - &yuld_prty1_bb_b0}; - -static struct attn_hw_reg xyld_int0_bb_b0 = { - 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184}; - -static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { - &xyld_int0_bb_b0}; - -static struct attn_hw_reg xyld_prty1_bb_b0 = { - 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204}; - -static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { - &xyld_prty1_bb_b0}; - -static struct attn_hw_reg prm_int0_bb_b0 = { - 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044}; - -static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { - &prm_int0_bb_b0}; - -static struct attn_hw_reg prm_prty0_bb_b0 = { - 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054}; - -static struct attn_hw_reg prm_prty1_bb_b0 = { - 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204}; - -static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { - &prm_prty0_bb_b0, &prm_prty1_bb_b0}; - -static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { - 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044}; - -static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { - &pbf_pb1_int0_bb_b0}; - -static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { - 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054}; - -static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { - &pbf_pb1_prty0_bb_b0}; - -static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { - 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044}; - -static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { - &pbf_pb2_int0_bb_b0}; - -static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { - 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054}; - -static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { - &pbf_pb2_prty0_bb_b0}; - -static struct attn_hw_reg rpb_int0_bb_b0 = { - 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044}; - -static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { - &rpb_int0_bb_b0}; - -static struct attn_hw_reg rpb_prty0_bb_b0 = { - 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054}; - -static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { - &rpb_prty0_bb_b0}; - -static struct attn_hw_reg btb_int0_bb_b0 = { - 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4}; - -static struct attn_hw_reg btb_int1_bb_b0 = { - 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc}; - -static struct attn_hw_reg btb_int2_bb_b0 = { - 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4}; - -static struct attn_hw_reg btb_int3_bb_b0 = { - 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c}; - -static struct attn_hw_reg btb_int4_bb_b0 = { - 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124}; - -static struct attn_hw_reg btb_int5_bb_b0 = { - 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c}; - -static struct attn_hw_reg btb_int6_bb_b0 = { - 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154}; - -static struct attn_hw_reg btb_int8_bb_b0 = { - 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188}; - -static struct attn_hw_reg btb_int9_bb_b0 = { - 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0}; - -static struct attn_hw_reg btb_int10_bb_b0 = { - 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8}; - -static struct attn_hw_reg btb_int11_bb_b0 = { - 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0}; - -static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { - &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, - &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, - &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0}; - -static struct attn_hw_reg btb_prty0_bb_b0 = { - 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0}; - -static struct attn_hw_reg btb_prty1_bb_b0 = { - 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404}; - -static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { - &btb_prty0_bb_b0, &btb_prty1_bb_b0}; - -static struct attn_hw_reg pbf_int0_bb_b0 = { - 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184}; - -static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { - &pbf_int0_bb_b0}; - -static struct attn_hw_reg pbf_prty0_bb_b0 = { - 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194}; - -static struct attn_hw_reg pbf_prty1_bb_b0 = { - 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204}; - -static struct attn_hw_reg pbf_prty2_bb_b0 = { - 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214}; - -static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { - &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0}; - -static struct attn_hw_reg rdif_int0_bb_b0 = { - 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184}; - -static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { - &rdif_int0_bb_b0}; - -static struct attn_hw_reg rdif_prty0_bb_b0 = { - 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194}; - -static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { - &rdif_prty0_bb_b0}; - -static struct attn_hw_reg tdif_int0_bb_b0 = { - 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184}; - -static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { - &tdif_int0_bb_b0}; - -static struct attn_hw_reg tdif_prty0_bb_b0 = { - 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194}; - -static struct attn_hw_reg tdif_prty1_bb_b0 = { - 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204}; - -static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { - &tdif_prty0_bb_b0, &tdif_prty1_bb_b0}; - -static struct attn_hw_reg cdu_int0_bb_b0 = { - 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc}; - -static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { - &cdu_int0_bb_b0}; - -static struct attn_hw_reg cdu_prty1_bb_b0 = { - 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204}; - -static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { - &cdu_prty1_bb_b0}; - -static struct attn_hw_reg ccfc_int0_bb_b0 = { - 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184}; - -static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { - &ccfc_int0_bb_b0}; - -static struct attn_hw_reg ccfc_prty1_bb_b0 = { - 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204}; - -static struct attn_hw_reg ccfc_prty0_bb_b0 = { - 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8}; - -static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { - &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0}; - -static struct attn_hw_reg tcfc_int0_bb_b0 = { - 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184}; - -static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { - &tcfc_int0_bb_b0}; - -static struct attn_hw_reg tcfc_prty1_bb_b0 = { - 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204}; - -static struct attn_hw_reg tcfc_prty0_bb_b0 = { - 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8}; - -static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { - &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0}; - -static struct attn_hw_reg igu_int0_bb_b0 = { - 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184}; - -static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { - &igu_int0_bb_b0}; - -static struct attn_hw_reg igu_prty0_bb_b0 = { - 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194}; - -static struct attn_hw_reg igu_prty1_bb_b0 = { - 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204}; - -static struct attn_hw_reg igu_prty2_bb_b0 = { - 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214}; - -static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { - &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0}; - -static struct attn_hw_reg cau_int0_bb_b0 = { - 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0}; - -static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { - &cau_int0_bb_b0}; - -static struct attn_hw_reg cau_prty1_bb_b0 = { - 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204}; - -static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { - &cau_prty1_bb_b0}; - -static struct attn_hw_reg dbg_int0_bb_b0 = { - 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184}; - -static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { - &dbg_int0_bb_b0}; - -static struct attn_hw_reg dbg_prty1_bb_b0 = { - 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204}; - -static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { - &dbg_prty1_bb_b0}; - -static struct attn_hw_reg nig_int0_bb_b0 = { - 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044}; - -static struct attn_hw_reg nig_int1_bb_b0 = { - 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054}; - -static struct attn_hw_reg nig_int2_bb_b0 = { - 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064}; - -static struct attn_hw_reg nig_int3_bb_b0 = { - 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074}; - -static struct attn_hw_reg nig_int4_bb_b0 = { - 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084}; - -static struct attn_hw_reg nig_int5_bb_b0 = { - 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094}; - -static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { - &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, - &nig_int4_bb_b0, &nig_int5_bb_b0}; - -static struct attn_hw_reg nig_prty0_bb_b0 = { - 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4}; - -static struct attn_hw_reg nig_prty1_bb_b0 = { - 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204}; - -static struct attn_hw_reg nig_prty2_bb_b0 = { - 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214}; - -static struct attn_hw_reg nig_prty3_bb_b0 = { - 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224}; - -static struct attn_hw_reg nig_prty4_bb_b0 = { - 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234}; - -static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { - &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, - &nig_prty3_bb_b0, &nig_prty4_bb_b0}; - -static struct attn_hw_reg ipc_int0_bb_b0 = { - 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510}; - -static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { - &ipc_int0_bb_b0}; - -static struct attn_hw_reg ipc_prty0_bb_b0 = { - 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520}; - -static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { - &ipc_prty0_bb_b0}; - -static struct attn_hw_block attn_blocks[] = { - {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } }, - {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } }, - {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } }, - {"dbu", {{0, 0, NULL, NULL} } }, - {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs, - pglue_b_prty_bb_b0_regs} } }, - {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } }, - {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } }, - {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } }, - {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } }, - {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } }, - {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } }, - {"mcp", {{0, 0, NULL, NULL} } }, - {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } }, - {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } }, - {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs, - pswhst2_prty_bb_b0_regs} } }, - {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } }, - {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } }, - {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } }, - {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } }, - {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } }, - {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } }, - {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } }, - {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } }, - {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } }, - {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } }, - {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } }, - {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } }, - {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } }, - {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } }, - {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } }, - {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } }, - {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } }, - {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } }, - {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } }, - {"src", {{1, 0, src_int_bb_b0_regs, NULL} } }, - {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } }, - {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } }, - {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } }, - {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } }, - {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } }, - {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } }, - {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } }, - {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } }, - {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } }, - {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } }, - {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } }, - {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } }, - {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } }, - {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } }, - {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } }, - {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } }, - {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } }, - {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } }, - {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } }, - {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs, - pbf_pb1_prty_bb_b0_regs} } }, - {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs, - pbf_pb2_prty_bb_b0_regs} } }, - {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } }, - {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } }, - {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } }, - {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } }, - {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } }, - {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } }, - {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } }, - {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } }, - {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } }, - {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } }, - {"umac", { {0, 0, NULL, NULL} } }, - {"xmac", { {0, 0, NULL, NULL} } }, - {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } }, - {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } }, - {"wol", { {0, 0, NULL, NULL} } }, - {"bmbn", { {0, 0, NULL, NULL} } }, - {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } }, - {"nwm", { {0, 0, NULL, NULL} } }, - {"nws", { {0, 0, NULL, NULL} } }, - {"ms", { {0, 0, NULL, NULL} } }, - {"phy_pcie", { {0, 0, NULL, NULL} } }, - {"misc_aeu", { {0, 0, NULL, NULL} } }, - {"bar0_map", { {0, 0, NULL, NULL} } },}; - /* Specific HW attention callbacks */ static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) { @@ -1590,6 +387,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) return -EINVAL; } +/* Instead of major changes to the data-structure, we have a some 'special' + * identifiers for sources that changed meaning between adapters. + */ +enum aeu_invert_reg_special_type { + AEU_INVERT_REG_SPECIAL_CNIG_0, + AEU_INVERT_REG_SPECIAL_CNIG_1, + AEU_INVERT_REG_SPECIAL_CNIG_2, + AEU_INVERT_REG_SPECIAL_CNIG_3, + AEU_INVERT_REG_SPECIAL_MAX, +}; + +static struct aeu_invert_reg_bit +aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { + {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, +}; + /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { { @@ -1636,8 +452,22 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, {"General Attention 35", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, - {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), - NULL, BLOCK_CNIG}, + {"NWS Parity", + ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), + NULL, BLOCK_NWS}, + {"NWS Interrupt", + ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), + NULL, BLOCK_NWS}, + {"NWM Parity", + ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), + NULL, BLOCK_NWM}, + {"NWM Interrupt", + ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), + NULL, BLOCK_NWM}, {"MCP CPU", ATTENTION_SINGLE, qed_mcp_attn_cb, MAX_BLOCK_ID}, {"MCP Watchdog timer", ATTENTION_SINGLE, @@ -1775,6 +605,27 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { }, }; +static struct aeu_invert_reg_bit * +qed_int_aeu_translate(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + if (!QED_IS_BB(p_hwfn->cdev)) + return p_bit; + + if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) + return p_bit; + + return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> + ATTENTION_BB_SHIFT]; +} + +static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & + ATTENTION_PARITY); +} + #define ATTN_STATE_BITS (0xfff) #define ATTN_BITS_MASKABLE (0x3ff) struct qed_sb_attn_info { @@ -1863,26 +714,23 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) return 0; } -static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn, - struct attn_hw_reg *p_reg_desc, - struct attn_hw_block *p_block, - enum qed_attention_type type, - u32 val, u32 mask) +static void qed_int_attn_print(struct qed_hwfn *p_hwfn, + enum block_id id, + enum dbg_attn_type type, bool b_clear) { - int j; + struct dbg_attn_block_result attn_results; + enum dbg_status status; - for (j = 0; j < p_reg_desc->num_of_bits; j++) { - if (!(val & (1 << j))) - continue; + memset(&attn_results, 0, sizeof(attn_results)); + status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, + b_clear, &attn_results); + if (status != DBG_STATUS_OK) DP_NOTICE(p_hwfn, - "%s (%s): reg %d [0x%08x], bit %d [%s]\n", - p_block->name, - type == QED_ATTN_TYPE_ATTN ? "Interrupt" : - "Parity", - p_reg_desc->reg_idx, p_reg_desc->sts_addr, - j, (mask & (1 << j)) ? " [MASKED]" : ""); - } + "Failed to parse attention information [status: %s]\n", + qed_dbg_get_status_str(status)); + else + qed_dbg_parse_attn(p_hwfn, &attn_results); } /** @@ -1901,53 +749,30 @@ static int qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu, u32 aeu_en_reg, - u32 bitmask) + const char *p_bit_name, u32 bitmask) { + bool b_fatal = false; int rc = -EINVAL; u32 val; DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", - p_aeu->bit_name, bitmask); + p_bit_name, bitmask); /* Call callback before clearing the interrupt status */ if (p_aeu->cb) { DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", - p_aeu->bit_name); + p_bit_name); rc = p_aeu->cb(p_hwfn); } - /* Handle HW block interrupt registers */ - if (p_aeu->block_index != MAX_BLOCK_ID) { - struct attn_hw_block *p_block; - u32 mask; - int i; - - p_block = &attn_blocks[p_aeu->block_index]; - - /* Handle each interrupt register */ - for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) { - struct attn_hw_reg *p_reg_desc; - u32 sts_addr; + if (rc) + b_fatal = true; - p_reg_desc = p_block->chip_regs[0].int_regs[i]; + /* Print HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) + qed_int_attn_print(p_hwfn, p_aeu->block_index, + ATTN_TYPE_INTERRUPT, !b_fatal); - /* In case of fatal attention, don't clear the status - * so it would appear in following idle check. - */ - if (rc == 0) - sts_addr = p_reg_desc->sts_clr_addr; - else - sts_addr = p_reg_desc->sts_addr; - - val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); - mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - p_reg_desc->mask_addr); - qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, - p_block, - QED_ATTN_TYPE_ATTN, - val, mask); - } - } /* If the attention is benign, no need to prevent it */ if (!rc) @@ -1957,66 +782,48 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", - p_aeu->bit_name); + p_bit_name); out: return rc; } -static void qed_int_parity_print(struct qed_hwfn *p_hwfn, - struct aeu_invert_reg_bit *p_aeu, - struct attn_hw_block *p_block, - u8 bit_index) -{ - int i; - - for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) { - struct attn_hw_reg *p_reg_desc; - u32 val, mask; - - p_reg_desc = p_block->chip_regs[0].prty_regs[i]; - - val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - p_reg_desc->sts_clr_addr); - mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - p_reg_desc->mask_addr); - qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, - p_block, - QED_ATTN_TYPE_PARITY, - val, mask); - } -} - /** * @brief qed_int_deassertion_parity - handle a single parity AEU source * * @param p_hwfn * @param p_aeu - descriptor of an AEU bit which caused the parity + * @param aeu_en_reg - address of the AEU enable register * @param bit_index */ static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu, - u8 bit_index) + u32 aeu_en_reg, u8 bit_index) { - u32 block_id = p_aeu->block_index; + u32 block_id = p_aeu->block_index, mask, val; - DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n", - p_aeu->bit_name, bit_index); + DP_NOTICE(p_hwfn->cdev, + "%s parity attention is set [address 0x%08x, bit %d]\n", + p_aeu->bit_name, aeu_en_reg, bit_index); if (block_id != MAX_BLOCK_ID) { - qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id], - bit_index); + qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); /* In BB, there's a single parity bit for several blocks */ if (block_id == BLOCK_BTB) { - qed_int_parity_print(p_hwfn, p_aeu, - &attn_blocks[BLOCK_OPTE], - bit_index); - qed_int_parity_print(p_hwfn, p_aeu, - &attn_blocks[BLOCK_MCP], - bit_index); + qed_int_attn_print(p_hwfn, BLOCK_OPTE, + ATTN_TYPE_PARITY, false); + qed_int_attn_print(p_hwfn, BLOCK_MCP, + ATTN_TYPE_PARITY, false); } } + + /* Prevent this parity error from being re-asserted */ + mask = ~BIT(bit_index); + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); + DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", + p_aeu->bit_name); } /** @@ -2031,7 +838,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; - u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; u8 i, j, k, bit_idx; int rc = 0; @@ -2048,11 +855,11 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, /* Find parity attentions first */ for (i = 0; i < NUM_ATTN_REGS; i++) { struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; - u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - MISC_REG_AEU_ENABLE1_IGU_OUT_0 + - i * sizeof(u32)); u32 parities; + aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); + en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + /* Skip register in which no parity bit is currently set */ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; if (!parities) @@ -2061,10 +868,10 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, for (j = 0, bit_idx = 0; bit_idx < 32; j++) { struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; - if ((p_bit->flags & ATTENTION_PARITY) && + if (qed_int_is_parity_flag(p_hwfn, p_bit) && !!(parities & BIT(bit_idx))) qed_int_deassertion_parity(p_hwfn, p_bit, - bit_idx); + aeu_en, bit_idx); bit_idx += ATTENTION_LENGTH(p_bit->flags); } @@ -2079,10 +886,11 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, continue; for (i = 0; i < NUM_ATTN_REGS; i++) { - u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + - i * sizeof(u32) + - k * sizeof(u32) * NUM_ATTN_REGS; - u32 en, bits; + u32 bits; + + aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32) + + k * sizeof(u32) * NUM_ATTN_REGS; en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); bits = aeu_inv_arr[i] & en; @@ -2096,29 +904,54 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, * previous assertion. */ for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + long unsigned int bitmask; u8 bit, bit_len; - u32 bitmask; p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; - - /* No need to handle parity-only bits */ - if (p_aeu->flags == ATTENTION_PAR) - continue; + p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); bit = bit_idx; bit_len = ATTENTION_LENGTH(p_aeu->flags); - if (p_aeu->flags & ATTENTION_PAR_INT) { + if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { /* Skip Parity */ bit++; bit_len--; } bitmask = bits & (((1 << bit_len) - 1) << bit); + bitmask >>= bit; + if (bitmask) { + u32 flags = p_aeu->flags; + char bit_name[30]; + u8 num; + + num = (u8)find_first_bit(&bitmask, + bit_len); + + /* Some bits represent more than a + * a single interrupt. Correctly print + * their name. + */ + if (ATTENTION_LENGTH(flags) > 2 || + ((flags & ATTENTION_PAR_INT) && + ATTENTION_LENGTH(flags) > 1)) + snprintf(bit_name, 30, + p_aeu->bit_name, num); + else + strncpy(bit_name, + p_aeu->bit_name, 30); + + /* We now need to pass bitmask in its + * correct position. + */ + bitmask <<= bit; + /* Handle source of the attention */ qed_int_deassertion_aeu_bit(p_hwfn, p_aeu, aeu_en, + bit_name, bitmask); } @@ -2328,6 +1161,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) SB_ATTN_ALIGNED_SIZE(p_hwfn), p_sb->sb_attn, p_sb->sb_phys); kfree(p_sb); + p_hwfn->p_sb_attn = NULL; } static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, @@ -2365,12 +1199,13 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ for (j = 0, k = 0; k < 32; j++) { - unsigned int flags = aeu_descs[i].bits[j].flags; + struct aeu_invert_reg_bit *p_aeu; - if (flags & ATTENTION_PARITY) + p_aeu = &aeu_descs[i].bits[j]; + if (qed_int_is_parity_flag(p_hwfn, p_aeu)) sb_info->parity_mask[i] |= 1 << k; - k += ATTENTION_LENGTH(flags); + k += ATTENTION_LENGTH(p_aeu->flags); } DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Attn Mask [Reg %d]: 0x%08x\n", @@ -2465,6 +1300,40 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); } +static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum qed_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + struct cau_pi_entry pi_entry; + u32 sb_offset, pi_offset; + + if (IS_VF(p_hwfn->cdev)) + return; + + sb_offset = igu_sb_id * PIS_PER_SB; + memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); + + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + + pi_offset = sb_offset + pi_index; + if (p_hwfn->hw_init_done) { + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), + *((u32 *)&(pi_entry))); + } else { + STORE_RT_REG(p_hwfn, + CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + *((u32 *)&(pi_entry))); + } +} + void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, @@ -2531,40 +1400,6 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, } } -void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 igu_sb_id, - u32 pi_index, - enum qed_coalescing_fsm coalescing_fsm, - u8 timeset) -{ - struct cau_pi_entry pi_entry; - u32 sb_offset, pi_offset; - - if (IS_VF(p_hwfn->cdev)) - return; - - sb_offset = igu_sb_id * PIS_PER_SB; - memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); - - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); - if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); - else - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); - - pi_offset = sb_offset + pi_index; - if (p_hwfn->hw_init_done) { - qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), - *((u32 *)&(pi_entry))); - } else { - STORE_RT_REG(p_hwfn, - CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, - *((u32 *)&(pi_entry))); - } -} - void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) { @@ -2577,16 +1412,47 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, sb_info->igu_sb_id, 0, 0); } -/** - * @brief qed_get_igu_sb_id - given a sw sb_id return the - * igu_sb_id - * - * @param p_hwfn - * @param sb_id - * - * @return u16 - */ -static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !(p_block->status & QED_IGU_STATUS_FREE)) + continue; + + if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) + return p_block; + } + + return NULL; +} + +static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + p_block->vector_number != vector_id) + continue; + + return igu_id; + } + + return QED_SB_INVALID_IDX; +} + +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { u16 igu_sb_id; @@ -2594,7 +1460,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) if (sb_id == QED_SP_SB_ID) igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; else if (IS_PF(p_hwfn->cdev)) - igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); else igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); @@ -2619,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); if (sb_id != QED_SP_SB_ID) { - p_hwfn->sbs_info[sb_id] = sb_info; - p_hwfn->num_sbs++; + if (IS_PF(p_hwfn->cdev)) { + struct qed_igu_info *p_info; + struct qed_igu_block *p_block; + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + p_block->sb_info = sb_info; + p_block->status &= ~QED_IGU_STATUS_FREE; + p_info->usage.free_cnt--; + } else { + qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); + } } sb_info->cdev = p_hwfn->cdev; @@ -2649,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id) { - if (sb_id == QED_SP_SB_ID) { - DP_ERR(p_hwfn, "Do Not free sp sb using this function"); - return -EINVAL; - } + struct qed_igu_block *p_block; + struct qed_igu_info *p_info; + + if (!sb_info) + return 0; /* zero status block and ack counter */ sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - if (p_hwfn->sbs_info[sb_id] != NULL) { - p_hwfn->sbs_info[sb_id] = NULL; - p_hwfn->num_sbs--; + if (IS_VF(p_hwfn->cdev)) { + qed_vf_set_sb_info(p_hwfn, sb_id, NULL); + return 0; } + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + /* Vector 0 is reserved to Default SB */ + if (!p_block->vector_number) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return -EINVAL; + } + + /* Lose reference to client's SB info, and fix counters */ + p_block->sb_info = NULL; + p_block->status |= QED_IGU_STATUS_FREE; + p_info->usage.free_cnt++; + return 0; } @@ -2679,6 +1571,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) p_sb->sb_info.sb_virt, p_sb->sb_info.sb_phys); kfree(p_sb); + p_hwfn->p_sp_sb = NULL; } static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -2780,10 +1673,9 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } -int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) +static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { - int rc = 0; /* Configure AEU signal change to produce attentions */ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); @@ -2796,6 +1688,16 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +int +qed_int_igu_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) +{ + int rc = 0; + + qed_int_igu_enable_attn(p_hwfn, p_ptt); + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { rc = qed_slowpath_irq_req(p_hwfn); if (rc) { @@ -2824,10 +1726,11 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) #define IGU_CLEANUP_SLEEP_LENGTH (1000) static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, bool cleanup_set, u16 opaque_fid) + u16 igu_sb_id, + bool cleanup_set, u16 opaque_fid) { u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; - u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; + u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; /* Set the data field */ @@ -2850,8 +1753,8 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, mmiowb(); /* calculate where to read the status bit from */ - sb_bit = 1 << (sb_id % 32); - sb_bit_addr = sb_id / 32 * sizeof(u32); + sb_bit = 1 << (igu_sb_id % 32); + sb_bit_addr = igu_sb_id / 32 * sizeof(u32); sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; @@ -2868,29 +1771,38 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, if (!sleep_cnt) DP_NOTICE(p_hwfn, "Timeout waiting for clear status 0x%08x [for sb %d]\n", - val, sb_id); + val, igu_sb_id); } void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, u16 opaque, bool b_set) + u16 igu_sb_id, u16 opaque, bool b_set) { + struct qed_igu_block *p_block; int pi, i; + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, p_block->vector_number); + /* Set */ if (b_set) - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); /* Clear */ - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); /* Wait for the IGU SB to cleanup */ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { u32 val; val = qed_rd(p_hwfn, p_ptt, - IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); - if (val & (1 << (sb_id % 32))) + IGU_REG_WRITE_DONE_PENDING + + ((igu_sb_id / 32) * 4)); + if (val & BIT((igu_sb_id % 32))) usleep_range(10, 20); else break; @@ -2898,84 +1810,205 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, if (i == IGU_CLEANUP_SLEEP_LENGTH) DP_NOTICE(p_hwfn, "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", - sb_id); + igu_sb_id); /* Clear the CAU for the SB */ for (pi = 0; pi < 12; pi++) qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); + CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); } void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_set, bool b_slowpath) { - u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; - u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; - u32 sb_id = 0, val = 0; + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct qed_igu_block *p_block; + u16 igu_sb_id = 0; + u32 val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning SBs [%d,...,%d]\n", - igu_base_sb, igu_base_sb + igu_sb_cnt - 1); + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; - for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + (p_block->status & QED_IGU_STATUS_DSB)) + continue; + + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, p_hwfn->hw_info.opaque_fid, b_set); + } - if (!b_slowpath) - return; - - sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning slowpath SB [%d]\n", sb_id); - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, - p_hwfn->hw_info.opaque_fid, b_set); + if (b_slowpath) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + p_info->igu_dsb_id, + p_hwfn->hw_info.opaque_fid, + b_set); } -static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 sb_id) +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; struct qed_igu_block *p_block; + int pf_sbs, vf_sbs; + u16 igu_sb_id; + u32 val, rval; - p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + if (!RESC_NUM(p_hwfn, QED_SB)) { + p_info->b_allow_pf_vf_change = false; + } else { + /* Use the numbers the MFW have provided - + * don't forget MFW accounts for the default SB as well. + */ + p_info->b_allow_pf_vf_change = true; + + if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { + DP_INFO(p_hwfn, + "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", + RESC_NUM(p_hwfn, QED_SB) - 1, + p_info->usage.cnt); + p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; + } - /* stop scanning when hit first invalid PF entry */ - if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && - GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) - goto out; + if (IS_PF_SRIOV(p_hwfn)) { + u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; - /* Fill the block information */ - p_block->status = QED_IGU_STATUS_VALID; - p_block->function_id = GET_FIELD(val, - IGU_MAPPING_LINE_FUNCTION_NUMBER); - p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); - p_block->vector_number = GET_FIELD(val, - IGU_MAPPING_LINE_VECTOR_NUMBER); + if (vfs != p_info->usage.iov_cnt) + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", + p_info->usage.iov_cnt, vfs); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", - sb_id, val, p_block->function_id, - p_block->is_pf, p_block->vector_number); + /* At this point we know how many SBs we have totally + * in IGU + number of PF SBs. So we can validate that + * we'd have sufficient for VF. + */ + if (vfs > p_info->usage.free_cnt + + p_info->usage.free_cnt_iov - p_info->usage.cnt) { + DP_NOTICE(p_hwfn, + "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", + p_info->usage.free_cnt + + p_info->usage.free_cnt_iov, + p_info->usage.cnt, vfs); + return -EINVAL; + } -out: - return val; + /* Currently cap the number of VFs SBs by the + * number of VFs. + */ + p_info->usage.iov_cnt = vfs; + } + } + + /* Mark all SBs as free, now in the right PF/VFs division */ + p_info->usage.free_cnt = p_info->usage.cnt; + p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; + p_info->usage.orig = p_info->usage.cnt; + p_info->usage.iov_orig = p_info->usage.iov_cnt; + + /* We now proceed to re-configure the IGU cam to reflect the initial + * configuration. We can start with the Default SB. + */ + pf_sbs = p_info->usage.cnt; + vf_sbs = p_info->usage.iov_cnt; + + for (igu_sb_id = p_info->igu_dsb_id; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + val = 0; + + if (!(p_block->status & QED_IGU_STATUS_VALID)) + continue; + + if (p_block->status & QED_IGU_STATUS_DSB) { + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_DSB; + } else if (pf_sbs) { + pf_sbs--; + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = p_info->usage.cnt - pf_sbs; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_FREE; + } else if (vf_sbs) { + p_block->function_id = + p_hwfn->cdev->p_iov_info->first_vf_in_pf + + p_info->usage.iov_cnt - vf_sbs; + p_block->is_pf = 0; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + vf_sbs--; + } else { + p_block->function_id = 0; + p_block->is_pf = 0; + p_block->vector_number = 0; + } + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, + p_block->function_id); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, + p_block->vector_number); + + /* VF entries would be enabled when VF is initializaed */ + SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); + + rval = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + + if (rval != val) { + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * igu_sb_id, val); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, + p_block->vector_number, rval, val); + } + } + + return 0; +} + +static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 igu_sb_id) +{ + u32 val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + struct qed_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + + /* Fill the block information */ + p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); + p_block->igu_sb_id = igu_sb_id; } int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; - u32 val, min_vf = 0, max_vf = 0; - u16 sb_id, last_iov_sb_id = 0; - struct qed_igu_block *blk; - u16 prev_sb_id = 0xFF; + struct qed_igu_block *p_block; + u32 min_vf = 0, max_vf = 0; + u16 igu_sb_id; p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); if (!p_hwfn->hw_info.p_igu_info) @@ -2983,12 +2016,10 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_igu_info = p_hwfn->hw_info.p_igu_info; - /* Initialize base sb / sb cnt for PFs and VFs */ - p_igu_info->igu_base_sb = 0xffff; - p_igu_info->igu_sb_cnt = 0; - p_igu_info->igu_dsb_id = 0xffff; - p_igu_info->igu_base_sb_iov = 0xffff; + /* Distinguish between existent and non-existent default SB */ + p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; + /* Find the range of VF ids whose SB belong to this PF */ if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; @@ -2996,113 +2027,69 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; } - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); - sb_id++) { - blk = &p_igu_info->igu_map.igu_blocks[sb_id]; - - val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); - - /* stop scanning when hit first invalid PF entry */ - if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && - GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) - break; - - if (blk->is_pf) { - if (blk->function_id == p_hwfn->rel_pf_id) { - blk->status |= QED_IGU_STATUS_PF; - - if (blk->vector_number == 0) { - if (p_igu_info->igu_dsb_id == 0xffff) - p_igu_info->igu_dsb_id = sb_id; - } else { - if (p_igu_info->igu_base_sb == - 0xffff) { - p_igu_info->igu_base_sb = sb_id; - } else if (prev_sb_id != sb_id - 1) { - DP_NOTICE(p_hwfn->cdev, - "consecutive igu vectors for HWFN %x broken", - p_hwfn->rel_pf_id); - break; - } - prev_sb_id = sb_id; - /* we don't count the default */ - (p_igu_info->igu_sb_cnt)++; - } - } - } else { - if ((blk->function_id >= min_vf) && - (blk->function_id < max_vf)) { - /* Available for VFs of this PF */ - if (p_igu_info->igu_base_sb_iov == 0xffff) { - p_igu_info->igu_base_sb_iov = sb_id; - } else if (last_iov_sb_id != sb_id - 1) { - if (!val) { - DP_VERBOSE(p_hwfn->cdev, - NETIF_MSG_INTR, - "First uninitialized IGU CAM entry at index 0x%04x\n", - sb_id); - } else { - DP_NOTICE(p_hwfn->cdev, - "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", - p_hwfn->rel_pf_id, - last_iov_sb_id, - sb_id); } - break; - } - blk->status |= QED_IGU_STATUS_FREE; - p_hwfn->hw_info.p_igu_info->free_blks++; - last_iov_sb_id = sb_id; - } + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + /* Read current entry; Notice it might not belong to this PF */ + qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); + p_block = &p_igu_info->entry[igu_sb_id]; + + if ((p_block->is_pf) && + (p_block->function_id == p_hwfn->rel_pf_id)) { + p_block->status = QED_IGU_STATUS_PF | + QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.cnt++; + } else if (!(p_block->is_pf) && + (p_block->function_id >= min_vf) && + (p_block->function_id < max_vf)) { + /* Available for VFs of this PF */ + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.iov_cnt++; } - } - /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect - * the number of VF SBs [especially for first VF on engine, as we can't - * differentiate between empty entries and its entries]. - * Since we don't really support more SBs than VFs today, prevent any - * such configuration by sanitizing the number of SBs to equal the - * number of VFs. - */ - if (IS_PF_SRIOV(p_hwfn)) { - u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + /* Mark the First entry belonging to the PF or its VFs + * as the default SB [we'll reset IGU prior to first usage]. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) && + (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { + p_igu_info->igu_dsb_id = igu_sb_id; + p_block->status |= QED_IGU_STATUS_DSB; + } - if (total_vfs < p_igu_info->free_blks) { - DP_VERBOSE(p_hwfn, - (NETIF_MSG_INTR | QED_MSG_IOV), - "Limiting number of SBs for IOV - %04x --> %04x\n", - p_igu_info->free_blks, - p_hwfn->cdev->p_iov_info->total_vfs); - p_igu_info->free_blks = total_vfs; - } else if (total_vfs > p_igu_info->free_blks) { - DP_NOTICE(p_hwfn, - "IGU has only %04x SBs for VFs while the device has %04x VFs\n", - p_igu_info->free_blks, total_vfs); - return -EINVAL; + /* limit number of prints by having each PF print only its + * entries with the exception of PF0 which would print + * everything. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) || + (p_hwfn->abs_pf_id == 0)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number); } } - p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; - - DP_VERBOSE( - p_hwfn, - NETIF_MSG_INTR, - "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_base_sb_iov, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_sb_cnt_iov, - p_igu_info->igu_dsb_id); - - if (p_igu_info->igu_base_sb == 0xffff || - p_igu_info->igu_dsb_id == 0xffff || - p_igu_info->igu_sb_cnt == 0) { + + if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { DP_NOTICE(p_hwfn, - "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_dsb_id); + "IGU CAM returned invalid values igu_dsb_id=0x%x\n", + p_igu_info->igu_dsb_id); return -EINVAL; } + /* All non default SB are considered free at this point */ + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", + p_igu_info->igu_dsb_id, + p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); + return 0; } @@ -3157,6 +2144,7 @@ static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->sp_dpc); + p_hwfn->sp_dpc = NULL; } int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -3198,31 +2186,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, if (!info || !p_sb_cnt_info) return; - p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; - p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; - p_sb_cnt_info->sb_free_blk = info->free_blks; -} - -u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) -{ - struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; - - /* Determine origin of SB id */ - if ((sb_id >= p_info->igu_base_sb) && - (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { - return sb_id - p_info->igu_base_sb; - } else if ((sb_id >= p_info->igu_base_sb_iov) && - (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { - /* We want the first VF queue to be adjacent to the - * last PF queue. Since L2 queues can be partial to - * SBs, we'll use the feature instead. - */ - return sb_id - p_info->igu_base_sb_iov + - FEAT_NUM(p_hwfn, QED_PF_L2_QUE); - } else { - DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); - return 0; - } + memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); } void qed_int_disable_post_isr_release(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 0ae0bb4593ef..5199634ed630 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -79,24 +79,6 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_cau_conf_pi - configure cau for a given - * status block - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - * @param pi_index - * @param state - * @param timeset - */ -void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 igu_sb_id, - u32 pi_index, - enum qed_coalescing_fsm coalescing_fsm, - u8 timeset); - -/** * @brief qed_int_igu_enable_int - enable device interrupts * * @param p_hwfn @@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define SB_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) +#define QED_SB_INVALID_IDX 0xffff + struct qed_igu_block { - u8 status; + u8 status; #define QED_IGU_STATUS_FREE 0x01 #define QED_IGU_STATUS_VALID 0x02 #define QED_IGU_STATUS_PF 0x04 +#define QED_IGU_STATUS_DSB 0x08 - u8 vector_number; - u8 function_id; - u8 is_pf; -}; + u8 vector_number; + u8 function_id; + u8 is_pf; + + /* Index inside IGU [meant for back reference] */ + u16 igu_sb_id; -struct qed_igu_map { - struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH]; + struct qed_sb_info *sb_info; }; struct qed_igu_info { - struct qed_igu_map igu_map; - u16 igu_dsb_id; - u16 igu_base_sb; - u16 igu_base_sb_iov; - u16 igu_sb_cnt; - u16 igu_sb_cnt_iov; - u16 free_blks; + struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; + u16 igu_dsb_id; + + struct qed_sb_cnt_info usage; + + bool b_allow_pf_vf_change; }; -/* TODO Names of function may change... */ +/** + * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * + * @param p_hwfn + * @param p_ptt + */ +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * + * @param p_hwfn + * @param sb_id - user provided sb_id + * + * @return an index inside IGU CAM where the SB resides + */ +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief return a pointer to an unused valid SB + * + * @param p_hwfn + * @param b_is_pf - true iff we want a SB belonging to a PF + * + * @return point to an igu_block, NULL if none is available + */ +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, + bool b_is_pf); + void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_set, @@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); * * @param p_hwfn * @param p_ptt - * @param sb_id - igu status block id + * @param igu_sb_id - igu status block id * @param opaque - opaque fid of the sb owner. * @param b_set - set(1) / clear(0) */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, + u16 igu_sb_id, u16 opaque, bool b_set); @@ -377,16 +390,6 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Returns an Rx queue index appropriate for usage with given SB. - * - * @param p_hwfn - * @param sb_id - absolute index of SB - * - * @return index of Rx queue - */ -u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); - -/** * @brief - Enable Interrupt & Attention for hw function * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 339c91dfa658..813c77cc857f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -44,7 +44,6 @@ #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> -#include <linux/version.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/list.h> @@ -63,6 +62,22 @@ #include "qed_sriov.h" #include "qed_reg_addr.h" +static int +qed_iscsi_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code) +{ + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + fw_event_code, data); + } else { + DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); + return -EINVAL; + } +} + struct qed_iscsi_conn { struct list_head list_entry; bool free_on_delete; @@ -186,7 +201,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, DP_ERR(p_hwfn, "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", p_params->num_queues, - p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]); + p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); return -EINVAL; } @@ -221,7 +236,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; for (i = 0; i < p_params->num_queues; i++) { - val = p_hwfn->sbs_info[i]->igu_sb_id; + val = qed_get_igu_sb_id(p_hwfn, i); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); } @@ -266,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_cb = async_event_cb; + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI, + qed_iscsi_async_event); + return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -375,7 +393,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); p_tcp->srtt = cpu_to_le16(p_conn->srtt); p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); - p_tcp->ts_time = cpu_to_le32(p_conn->ts_time); p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); @@ -400,8 +417,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->mss = cpu_to_le16(p_conn->mss); p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; - dval = p_conn->ts_ticks_per_second; - p_tcp->ts_ticks_per_second = cpu_to_le32(dval); wval = p_conn->da_timeout_value; p_tcp->da_timeout_value = cpu_to_le16(wval); p_tcp->ack_frequency = p_conn->ack_frequency; @@ -492,6 +507,54 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int +qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_mac_update *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u8 ucval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + ucval = p_conn->remote_mac[1]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; + ucval = p_conn->remote_mac[0]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval; + ucval = p_conn->remote_mac[3]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval; + ucval = p_conn->remote_mac[2]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval; + ucval = p_conn->remote_mac[5]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval; + ucval = p_conn->remote_mac[4]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, @@ -587,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.iscsi_destroy; p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; - return qed_spq_post(p_hwfn, p_ent, NULL); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI); + return rc; } static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) @@ -708,7 +774,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - r2tq_num_elements, 0x80, &p_conn->r2tq); + r2tq_num_elements, 0x80, &p_conn->r2tq, NULL); if (rc) goto nomem_r2tq; @@ -719,7 +785,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, uhq_num_elements, - sizeof(struct iscsi_uhqe), &p_conn->uhq); + sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL); if (rc) goto nomem_uhq; @@ -729,7 +795,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, xhq_num_elements, - sizeof(struct iscsi_xhqe), &p_conn->xhq); + sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL); if (rc) goto nomem; @@ -822,29 +888,32 @@ void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, kfree(p_conn); } -struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { struct qed_iscsi_info *p_iscsi_info; p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); if (!p_iscsi_info) - return NULL; + return -ENOMEM; INIT_LIST_HEAD(&p_iscsi_info->free_list); - return p_iscsi_info; + + p_hwfn->p_iscsi_info = p_iscsi_info; + return 0; } -void qed_iscsi_setup(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info) +void qed_iscsi_setup(struct qed_hwfn *p_hwfn) { - spin_lock_init(&p_iscsi_info->lock); + spin_lock_init(&p_hwfn->p_iscsi_info->lock); } -void qed_iscsi_free(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info) +void qed_iscsi_free(struct qed_hwfn *p_hwfn) { struct qed_iscsi_conn *p_conn = NULL; + if (!p_hwfn->p_iscsi_info) + return; + while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) { p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, struct qed_iscsi_conn, list_entry); @@ -854,7 +923,8 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn, } } - kfree(p_iscsi_info); + kfree(p_hwfn->p_iscsi_info); + p_hwfn->p_iscsi_info = NULL; } static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, @@ -1324,6 +1394,23 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); } +static int qed_iscsi_change_mac(struct qed_dev *cdev, + u32 handle, const u8 *mac) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats) { @@ -1358,6 +1445,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = { .destroy_conn = &qed_iscsi_destroy_conn, .clear_sq = &qed_iscsi_clear_conn_sq, .get_stats = &qed_iscsi_stats, + .change_mac = &qed_iscsi_change_mac, }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index ae98f772cbc0..225c75b02a06 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -57,13 +57,11 @@ extern const struct qed_ll2_ops qed_ll2_ops_pass; #endif #if IS_ENABLED(CONFIG_QED_ISCSI) -struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn); +int qed_iscsi_alloc(struct qed_hwfn *p_hwfn); -void qed_iscsi_setup(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info); +void qed_iscsi_setup(struct qed_hwfn *p_hwfn); -void qed_iscsi_free(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info); +void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** * @brief - Fills provided statistics struct with statistics. @@ -74,12 +72,15 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn, void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ -static inline struct qed_iscsi_info *qed_iscsi_alloc( - struct qed_hwfn *p_hwfn) { return NULL; } -static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info) {} -static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info) {} +static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {} + +static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} + static inline void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats) {} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c new file mode 100644 index 000000000000..b251ebaec4db --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -0,0 +1,2408 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/spinlock.h> +#include <linux/tcp.h> +#include "qed_cxt.h" +#include "qed_hw.h" +#include "qed_ll2.h" +#include "qed_rdma.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +#define QED_IWARP_ORD_DEFAULT 32 +#define QED_IWARP_IRD_DEFAULT 32 +#define QED_IWARP_MAX_FW_MSS 4120 + +#define QED_EP_SIG 0xecabcdef + +struct mpa_v2_hdr { + __be16 ird; + __be16 ord; +}; + +#define MPA_V2_PEER2PEER_MODEL 0x8000 +#define MPA_V2_SEND_RTR 0x4000 /* on ird */ +#define MPA_V2_READ_RTR 0x4000 /* on ord */ +#define MPA_V2_WRITE_RTR 0x8000 +#define MPA_V2_IRD_ORD_MASK 0x3FFF + +#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) + +#define QED_IWARP_INVALID_TCP_CID 0xffffffff +#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) +#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) +#define TIMESTAMP_HEADER_SIZE (12) + +#define QED_IWARP_TS_EN BIT(0) +#define QED_IWARP_DA_EN BIT(1) +#define QED_IWARP_PARAM_CRC_NEEDED (1) +#define QED_IWARP_PARAM_P2P (1) + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, u16 echo, + union event_ring_data *data, + u8 fw_return_code); + +/* Override devinfo with iWARP specific values */ +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; + dev->max_qp = min_t(u32, + IWARP_MAX_QPS, + p_hwfn->p_rdma_info->num_qps) - + QED_IWARP_PREALLOC_CNT; + + dev->max_cq = dev->max_qp; + + dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; + dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; +} + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; +} + +/* We have two cid maps, one for tcp which should be used only from passive + * syn processing and replacing a pre-allocated ep in the list. The second + * for active tcp and for QPs. + */ +static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + if (cid < QED_IWARP_PREALLOC_CNT) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + cid); + else + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) { + DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); + return rc; + } + *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); + if (rc) + qed_iwarp_cid_cleaned(p_hwfn, *cid); + + return rc; +} + +static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +/* This function allocates a cid for passive tcp (called from syn receive) + * the reason it's separate from the regular cid allocation is because it + * is assured that these cids already have ilt allocated. They are preallocated + * to ensure that we won't need to allocate memory during syn processing + */ +static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map, cid); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "can't allocate iwarp tcp cid max-count=%d\n", + p_hwfn->p_rdma_info->tcp_cid_map.max_count); + + *cid = QED_IWARP_INVALID_TCP_CID; + return rc; + } + + *cid += qed_cxt_get_proto_cid_start(p_hwfn, + p_hwfn->p_rdma_info->proto); + return 0; +} + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params) +{ + struct iwarp_create_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 physical_queue; + u32 cid; + int rc; + + qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + &qp->shared_queue_phys_addr, + GFP_KERNEL); + if (!qp->shared_queue) + return -ENOMEM; + + out_params->sq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->sq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->rq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + out_params->rq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err1; + + qp->icid = (u16)cid; + + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.cid = qp->icid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_CREATE_QP, + PROTOCOLID_IWARP, &init_data); + if (rc) + goto err2; + + p_ramrod = &p_ent->ramrod.iwarp_create_qp; + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, + qp->fmr_and_reserved_lkey); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, + qp->incoming_rdma_read_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, + qp->incoming_rdma_write_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, + qp->incoming_atomic_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + + p_ramrod->pd = qp->pd; + p_ramrod->sq_num_pages = qp->sq_num_pages; + p_ramrod->rq_num_pages = qp->rq_num_pages; + + p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); + p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + + p_ramrod->cq_cid_for_sq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); + p_ramrod->cq_cid_for_rq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); + + p_ramrod->dpi = cpu_to_le16(qp->dpi); + + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_ramrod->physical_q0 = cpu_to_le16(physical_queue); + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_ramrod->physical_q1 = cpu_to_le16(physical_queue); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err2; + + return rc; + +err2: + qed_iwarp_cid_cleaned(p_hwfn, cid); +err1: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct iwarp_modify_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iwarp_modify_qp; + SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, + 0x1); + if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) + p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; + else + p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); + + return rc; +} + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) +{ + switch (state) { + case QED_ROCE_QP_STATE_RESET: + case QED_ROCE_QP_STATE_INIT: + case QED_ROCE_QP_STATE_RTR: + return QED_IWARP_QP_STATE_IDLE; + case QED_ROCE_QP_STATE_RTS: + return QED_IWARP_QP_STATE_RTS; + case QED_ROCE_QP_STATE_SQD: + return QED_IWARP_QP_STATE_CLOSING; + case QED_ROCE_QP_STATE_ERR: + return QED_IWARP_QP_STATE_ERROR; + case QED_ROCE_QP_STATE_SQE: + return QED_IWARP_QP_STATE_TERMINATE; + default: + return QED_IWARP_QP_STATE_ERROR; + } +} + +static enum qed_roce_qp_state +qed_iwarp2roce_state(enum qed_iwarp_qp_state state) +{ + switch (state) { + case QED_IWARP_QP_STATE_IDLE: + return QED_ROCE_QP_STATE_INIT; + case QED_IWARP_QP_STATE_RTS: + return QED_ROCE_QP_STATE_RTS; + case QED_IWARP_QP_STATE_TERMINATE: + return QED_ROCE_QP_STATE_SQE; + case QED_IWARP_QP_STATE_CLOSING: + return QED_ROCE_QP_STATE_SQD; + case QED_IWARP_QP_STATE_ERROR: + return QED_ROCE_QP_STATE_ERR; + default: + return QED_ROCE_QP_STATE_ERR; + } +} + +const char *iwarp_state_names[] = { + "IDLE", + "RTS", + "TERMINATE", + "CLOSING", + "ERROR", +}; + +int +qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal) +{ + enum qed_iwarp_qp_state prev_iw_state; + bool modify_fw = false; + int rc = 0; + + /* modify QP can be called from upper-layer or as a result of async + * RST/FIN... therefore need to protect + */ + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + prev_iw_state = qp->iwarp_state; + + if (prev_iw_state == new_state) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + return 0; + } + + switch (prev_iw_state) { + case QED_IWARP_QP_STATE_IDLE: + switch (new_state) { + case QED_IWARP_QP_STATE_RTS: + qp->iwarp_state = QED_IWARP_QP_STATE_RTS; + break; + case QED_IWARP_QP_STATE_ERROR: + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + if (!internal) + modify_fw = true; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_RTS: + switch (new_state) { + case QED_IWARP_QP_STATE_CLOSING: + if (!internal) + modify_fw = true; + + qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; + break; + case QED_IWARP_QP_STATE_ERROR: + if (!internal) + modify_fw = true; + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_ERROR: + switch (new_state) { + case QED_IWARP_QP_STATE_IDLE: + + qp->iwarp_state = new_state; + break; + case QED_IWARP_QP_STATE_CLOSING: + /* could happen due to race... do nothing.... */ + break; + default: + rc = -EINVAL; + } + break; + case QED_IWARP_QP_STATE_TERMINATE: + case QED_IWARP_QP_STATE_CLOSING: + qp->iwarp_state = new_state; + break; + default: + break; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", + qp->icid, + iwarp_state_names[prev_iw_state], + iwarp_state_names[qp->iwarp_state], + internal ? "internal" : ""); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + + if (modify_fw) + rc = qed_iwarp_modify_fw(p_hwfn, qp); + + return rc; +} + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); + + return rc; +} + +static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + bool remove_from_active_list) +{ + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + ep->ep_buffer_virt, ep->ep_buffer_phys); + + if (remove_from_active_list) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + if (ep->qp) + ep->qp->ep = NULL; + + kfree(ep); +} + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct qed_iwarp_ep *ep = qp->ep; + int wait_count = 0; + int rc = 0; + + if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { + rc = qed_iwarp_modify_qp(p_hwfn, qp, + QED_IWARP_QP_STATE_ERROR, false); + if (rc) + return rc; + } + + /* Make sure ep is closed before returning and freeing memory. */ + if (ep) { + while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200) + msleep(100); + + if (ep->state != QED_IWARP_EP_CLOSED) + DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", + ep->state); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } + + rc = qed_iwarp_fw_destroy(p_hwfn, qp); + + if (qp->shared_queue) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +static int +qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) +{ + struct qed_iwarp_ep *ep; + int rc; + + ep = kzalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->state = QED_IWARP_EP_INIT; + + ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + &ep->ep_buffer_phys, + GFP_KERNEL); + if (!ep->ep_buffer_virt) { + rc = -ENOMEM; + goto err; + } + + ep->sig = QED_EP_SIG; + + *ep_out = ep; + + return 0; + +err: + kfree(ep); + return rc; +} + +static void +qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", + p_tcp_ramrod->tcp.local_mac_addr_lo, + p_tcp_ramrod->tcp.local_mac_addr_mid, + p_tcp_ramrod->tcp.local_mac_addr_hi, + p_tcp_ramrod->tcp.remote_mac_addr_lo, + p_tcp_ramrod->tcp.remote_mac_addr_mid, + p_tcp_ramrod->tcp.remote_mac_addr_hi); + + if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", + p_tcp_ramrod->tcp.flow_label, + p_tcp_ramrod->tcp.ttl, + p_tcp_ramrod->tcp.tos_or_tc, + p_tcp_ramrod->tcp.mss, + p_tcp_ramrod->tcp.rcv_wnd_scale, + p_tcp_ramrod->tcp.connect_mode, + p_tcp_ramrod->tcp.flags); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", + p_tcp_ramrod->tcp.syn_ip_payload_length, + p_tcp_ramrod->tcp.syn_phy_addr_lo, + p_tcp_ramrod->tcp.syn_phy_addr_hi); +} + +static int +qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; + struct tcp_offload_params_opt2 *tcp; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t async_output_phys; + dma_addr_t in_pdata_phys; + u16 physical_q; + u8 tcp_flags; + int rc; + int i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = ep->tcp_cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + if (ep->connect_mode == TCP_CONNECT_PASSIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, + in_pdata_phys); + + p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, + async_output_phys); + + p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); + p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; + + tcp = &p_tcp_ramrod->tcp; + qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, + &tcp->remote_mac_addr_mid, + &tcp->remote_mac_addr_lo, ep->remote_mac_addr); + qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, + &tcp->local_mac_addr_lo, ep->local_mac_addr); + + tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); + + tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; + tcp->flags = 0; + SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, + !!(tcp_flags & QED_IWARP_TS_EN)); + + SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, + !!(tcp_flags & QED_IWARP_DA_EN)); + + tcp->ip_version = ep->cm_info.ip_version; + + for (i = 0; i < 4; i++) { + tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); + tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); + } + + tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); + tcp->local_port = cpu_to_le16(ep->cm_info.local_port); + tcp->mss = cpu_to_le16(ep->mss); + tcp->flow_label = 0; + tcp->ttl = 0x40; + tcp->tos_or_tc = 0; + + tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; + tcp->connect_mode = ep->connect_mode; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + tcp->syn_ip_payload_length = + cpu_to_le16(ep->syn_ip_payload_length); + tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); + tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); + } + + qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); + + return rc; +} + +static void +qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_cm_event_params params; + struct mpa_v2_hdr *mpa_v2; + union async_output *async_data; + u16 mpa_ord, mpa_ird; + u8 mpa_hdr_size = 0; + u8 mpa_rev; + + async_data = &ep->ep_buffer_virt->async_output; + + mpa_rev = async_data->mpa_request.mpa_handshake_mode; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len=%x handshake_mode=%x private_data=(%x)\n", + async_data->mpa_request.ulp_data_len, + mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); + + if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Read ord/ird values from private data buffer */ + mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; + mpa_hdr_size = sizeof(*mpa_v2); + + mpa_ord = ntohs(mpa_v2->ord); + mpa_ird = ntohs(mpa_v2->ird); + + /* Temprary store in cm_info incoming ord/ird requested, later + * replace with negotiated value during accept + */ + ep->cm_info.ord = (u8)min_t(u16, + (mpa_ord & MPA_V2_IRD_ORD_MASK), + QED_IWARP_ORD_DEFAULT); + + ep->cm_info.ird = (u8)min_t(u16, + (mpa_ird & MPA_V2_IRD_ORD_MASK), + QED_IWARP_IRD_DEFAULT); + + /* Peer2Peer negotiation */ + ep->rtr_type = MPA_RTR_TYPE_NONE; + if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { + if (mpa_ord & MPA_V2_WRITE_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; + + if (mpa_ord & MPA_V2_READ_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; + + if (mpa_ird & MPA_V2_SEND_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; + + ep->rtr_type &= iwarp_info->rtr_type; + + /* if we're left with no match send our capabilities */ + if (ep->rtr_type == MPA_RTR_TYPE_NONE) + ep->rtr_type = iwarp_info->rtr_type; + } + + ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + } else { + ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; + ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; + ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", + mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, + async_data->mpa_request.ulp_data_len, mpa_hdr_size); + + /* Strip mpa v2 hdr from private data before sending to upper layer */ + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; + + ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - + mpa_hdr_size; + + params.event = QED_IWARP_EVENT_MPA_REQUEST; + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->state = QED_IWARP_EP_MPA_REQ_RCVD; + ep->event_cb(ep->cb_context, ¶ms); +} + +static int +qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct qed_sp_init_data init_data; + dma_addr_t async_output_phys; + struct qed_spq_entry *p_ent; + dma_addr_t out_pdata_phys; + dma_addr_t in_pdata_phys; + struct qed_rdma_qp *qp; + bool reject; + int rc; + + if (!ep) + return -EINVAL; + + qp = ep->qp; + reject = !qp; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = reject ? ep->tcp_cid : qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; + out_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, out_pdata); + DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr, + out_pdata_phys); + p_mpa_ramrod->common.outgoing_ulp_buffer.len = + ep->cm_info.private_data_len; + p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; + + p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; + p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; + + p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + p_mpa_ramrod->tcp_connect_side = ep->connect_mode; + DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, + in_pdata_phys); + p_mpa_ramrod->incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, + async_output_phys); + p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + if (!reject) { + DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, + qp->shared_queue_phys_addr); + p_mpa_ramrod->stats_counter_id = + RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; + } else { + p_mpa_ramrod->common.reject = 1; + } + + p_mpa_ramrod->mode = ep->mpa_rev; + SET_FIELD(p_mpa_ramrod->rtr_pref, + IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); + + ep->state = QED_IWARP_EP_MPA_OFFLOADED; + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (!reject) + ep->cid = qp->icid; /* Now they're migrated. */ + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", + reject ? 0xffff : qp->icid, + ep->tcp_cid, + rc, + ep->cm_info.ird, + ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); + return rc; +} + +static void +qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + ep->state = QED_IWARP_EP_INIT; + if (ep->qp) + ep->qp->ep = NULL; + ep->qp = NULL; + memset(&ep->cm_info, 0, sizeof(ep->cm_info)); + + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + /* We don't care about the return code, it's ok if tcp_cid + * remains invalid...in this case we'll defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + } + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + list_del(&ep->list_entry); + list_add_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); +} + +void +qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct mpa_v2_hdr *mpa_v2_params; + union async_output *async_data; + u16 mpa_ird, mpa_ord; + u8 mpa_data_size = 0; + + if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); + mpa_data_size = sizeof(*mpa_v2_params); + mpa_ird = ntohs(mpa_v2_params->ird); + mpa_ord = ntohs(mpa_v2_params->ord); + + ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); + ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); + } + async_data = &ep->ep_buffer_virt->async_output; + + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; + ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - + mpa_data_size; +} + +void +qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_cm_event_params params; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + DP_NOTICE(p_hwfn, + "MPA reply event not expected on passive side!\n"); + return; + } + + params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; + + qed_iwarp_parse_private_data(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->mpa_reply_processed = true; + + ep->event_cb(ep->cb_context, ¶ms); +} + +#define QED_IWARP_CONNECT_MODE_STRING(ep) \ + ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" + +/* Called as a result of the event: + * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE + */ +static void +qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + else + params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) + qed_iwarp_parse_private_data(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + + params.ep_context = ep; + + ep->state = QED_IWARP_EP_CLOSED; + + switch (fw_return_code) { + case RDMA_RETURN_OK: + ep->qp->max_rd_atomic_req = ep->cm_info.ord; + ep->qp->max_rd_atomic_resp = ep->cm_info.ird; + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); + ep->state = QED_IWARP_EP_ESTABLISHED; + params.status = 0; + break; + case IWARP_CONN_ERROR_MPA_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_ERROR_REJECT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RST: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, + ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_MPA_FIN: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INSUF_IRD: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_TERMINATE: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + default: + params.status = -ECONNRESET; + break; + } + + ep->event_cb(ep->cb_context, ¶ms); + + /* on passive side, if there is no associated QP (REJECT) we need to + * return the ep to the pool, (in the regular case we add an element + * in accept instead of this one. + * In both cases we need to remove it from the ep_list. + */ + if (fw_return_code != RDMA_RETURN_OK) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && + (!ep->qp)) { /* Rejected */ + qed_iwarp_return_ep(p_hwfn, ep); + } else { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + } +} + +static void +qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 *mpa_data_size) +{ + struct mpa_v2_hdr *mpa_v2_params; + u16 mpa_ird, mpa_ord; + + *mpa_data_size = 0; + if (MPA_REV2(ep->mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; + *mpa_data_size = sizeof(*mpa_v2_params); + + mpa_ird = (u16)ep->cm_info.ird; + mpa_ord = (u16)ep->cm_info.ord; + + if (ep->rtr_type != MPA_RTR_TYPE_NONE) { + mpa_ird |= MPA_V2_PEER2PEER_MODEL; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) + mpa_ird |= MPA_V2_SEND_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) + mpa_ord |= MPA_V2_WRITE_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) + mpa_ord |= MPA_V2_READ_RTR; + } + + mpa_v2_params->ird = htons(mpa_ird); + mpa_v2_params->ord = htons(mpa_ord); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", + mpa_v2_params->ird, + mpa_v2_params->ord, + *((u32 *)mpa_v2_params), + mpa_ord & MPA_V2_IRD_ORD_MASK, + mpa_ird & MPA_V2_IRD_ORD_MASK, + !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), + !!(mpa_ird & MPA_V2_SEND_RTR), + !!(mpa_ord & MPA_V2_WRITE_RTR), + !!(mpa_ord & MPA_V2_READ_RTR)); + } +} + +int qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_info *iwarp_info; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + u8 ts_hdr_size = 0; + u32 cid; + int rc; + + if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || + (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { + DP_NOTICE(p_hwfn, + "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, iparams->cm_info.ord, + iparams->cm_info.ird); + + return -EINVAL; + } + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + /* Allocate ep object */ + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + return rc; + + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + goto err; + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->qp = iparams->qp; + ep->qp->ep = ep; + ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); + memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); + + ep->cm_info.ord = iparams->cm_info.ord; + ep->cm_info.ird = iparams->cm_info.ird; + + ep->rtr_type = iwarp_info->rtr_type; + if (!iwarp_info->peer2peer) + ep->rtr_type = MPA_RTR_TYPE_NONE; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) + ep->cm_info.ord = 1; + + ep->mpa_rev = iwarp_info->mpa_rev; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->cm_info.private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->cm_info.private_data, + iparams->cm_info.private_data_len); + + if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) + ts_hdr_size = TIMESTAMP_HEADER_SIZE; + + ep->mss = iparams->mss - ts_hdr_size; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = iparams->event_cb; + ep->cb_context = iparams->cb_context; + ep->connect_mode = TCP_CONNECT_ACTIVE; + + oparams->ep_context = ep; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", + iparams->qp->icid, ep->tcp_cid, rc); + + if (rc) { + qed_iwarp_destroy_ep(p_hwfn, ep, true); + goto err; + } + + return rc; +err: + qed_iwarp_cid_cleaned(p_hwfn, cid); + + return rc; +} + +static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep = NULL; + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + DP_ERR(p_hwfn, "Ep list is empty\n"); + goto out; + } + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + /* in some cases we could have failed allocating a tcp cid when added + * from accept / failure... retry now..this is not the common case. + */ + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + + /* if we fail we could look for another entry with a valid + * tcp_cid, but since we don't expect to reach this anyway + * it's not worth the handling + */ + if (rc) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + ep = NULL; + goto out; + } + } + + list_del(&ep->list_entry); + +out: + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + return ep; +} + +#define QED_IWARP_MAX_CID_CLEAN_TIME 100 +#define QED_IWARP_MAX_NO_PROGRESS_CNT 5 + +/* This function waits for all the bits of a bmap to be cleared, as long as + * there is progress ( i.e. the number of bits left to be cleared decreases ) + * the function continues. + */ +static int +qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) +{ + int prev_weight = 0; + int wait_count = 0; + int weight = 0; + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + prev_weight = weight; + + while (weight) { + msleep(QED_IWARP_MAX_CID_CLEAN_TIME); + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + + if (prev_weight == weight) { + wait_count++; + } else { + prev_weight = weight; + wait_count = 0; + } + + if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { + DP_NOTICE(p_hwfn, + "%s bitmap wait timed out (%d cids pending)\n", + bmap->name, weight); + return -EBUSY; + } + } + return 0; +} + +static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) +{ + int rc; + int i; + + rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map); + if (rc) + return rc; + + /* Now free the tcp cids from the main cid map */ + for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); + + /* Now wait for all cids to be completed */ + return qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->cid_map); +} + +static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep; + + while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + if (!ep) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + break; + } + list_del(&ep->list_entry); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) + qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } +} + +static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) +{ + struct qed_iwarp_ep *ep; + int rc = 0; + int count; + u32 cid; + int i; + + count = init ? QED_IWARP_PREALLOC_CNT : 1; + for (i = 0; i < count; i++) { + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + return rc; + + /* During initialization we allocate from the main pool, + * afterwards we allocate only from the tcp_cid. + */ + if (init) { + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err; + qed_iwarp_set_tcp_cid(p_hwfn, cid); + } else { + /* We don't care about the return code, it's ok if + * tcp_cid remains invalid...in this case we'll + * defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); + } + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + return rc; + +err: + qed_iwarp_destroy_ep(p_hwfn, ep, false); + + return rc; +} + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) +{ + int rc; + + /* Allocate bitmap for tcp cid. These are used by passive side + * to ensure it can allocate a tcp cid during dpc that was + * pre-acquired and doesn't require dynamic allocation of ilt + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + QED_IWARP_PREALLOC_CNT, "TCP_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate tcp cid, rc = %d\n", rc); + return rc; + } + + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + return qed_iwarp_prealloc_ep(p_hwfn, true); +} + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) +{ + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); +} + +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + int rc; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + iparams->qp->icid, ep->tcp_cid); + + if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || + (iparams->ird > QED_IWARP_IRD_DEFAULT)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, + ep->tcp_cid, iparams->ord, iparams->ord); + return -EINVAL; + } + + qed_iwarp_prealloc_ep(p_hwfn, false); + + ep->cb_context = iparams->cb_context; + ep->qp = iparams->qp; + ep->qp->ep = ep; + + if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Negotiate ord/ird: if upperlayer requested ord larger than + * ird advertised by remote, we need to decrease our ord + */ + if (iparams->ord > ep->cm_info.ird) + iparams->ord = ep->cm_info.ird; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && + (iparams->ird == 0)) + iparams->ird = 1; + } + + /* Update cm_info ord/ird to be negotiated values */ + ep->cm_info.ord = iparams->ord; + ep->cm_info.ird = iparams->ird; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + rc = qed_iwarp_mpa_offload(p_hwfn, ep); + if (rc) + qed_iwarp_modify_qp(p_hwfn, + iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); + + return rc; +} + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); + + ep->cb_context = iparams->cb_context; + ep->qp = NULL; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + return qed_iwarp_mpa_offload(p_hwfn, ep); +} + +static void +qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", + cm_info->ip_version); + + if (cm_info->ip_version == QED_TCP_IPV4) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + else + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len = %x ord = %d, ird = %d\n", + cm_info->private_data_len, cm_info->ord, cm_info->ird); +} + +static int +qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ll2_buff *buf, u8 handle) +{ + int rc; + + rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr, + (u16)buf->buff_size, buf, 1); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", + rc, handle); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size, + buf->data, buf->data_phys_addr); + kfree(buf); + } + + return rc; +} + +static bool +qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_ep *ep = NULL; + bool found = false; + + list_for_each_entry(ep, + &p_hwfn->p_rdma_info->iwarp.ep_list, + list_entry) { + if ((ep->cm_info.local_port == cm_info->local_port) && + (ep->cm_info.remote_port == cm_info->remote_port) && + (ep->cm_info.vlan == cm_info->vlan) && + !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, + sizeof(cm_info->local_ip)) && + !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, + sizeof(cm_info->remote_ip))) { + found = true; + break; + } + } + + if (found) { + DP_NOTICE(p_hwfn, + "SYN received on active connection - dropping\n"); + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + return true; + } + + return false; +} + +static struct qed_iwarp_listener * +qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_listener *listener = NULL; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + bool found = false; + + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + list_for_each_entry(listener, + &p_hwfn->p_rdma_info->iwarp.listen_list, + list_entry) { + if (listener->port == cm_info->local_port) { + if (!memcmp(listener->ip_addr, + ip_zero, sizeof(ip_zero))) { + found = true; + break; + } + + if (!memcmp(listener->ip_addr, + cm_info->local_ip, + sizeof(cm_info->local_ip)) && + (listener->vlan == cm_info->vlan)) { + found = true; + break; + } + } + } + + if (found) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", + listener); + return listener; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); + return NULL; +} + +static int +qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info, + void *buf, + u8 *remote_mac_addr, + u8 *local_mac_addr, + int *payload_len, int *tcp_start_offset) +{ + struct vlan_ethhdr *vethh; + bool vlan_valid = false; + struct ipv6hdr *ip6h; + struct ethhdr *ethh; + struct tcphdr *tcph; + struct iphdr *iph; + int eth_hlen; + int ip_hlen; + int eth_type; + int i; + + ethh = buf; + eth_type = ntohs(ethh->h_proto); + if (eth_type == ETH_P_8021Q) { + vlan_valid = true; + vethh = (struct vlan_ethhdr *)ethh; + cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; + eth_type = ntohs(vethh->h_vlan_encapsulated_proto); + } + + eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + + ether_addr_copy(remote_mac_addr, ethh->h_source); + ether_addr_copy(local_mac_addr, ethh->h_dest); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", + eth_type, ethh->h_source); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", + eth_hlen, ethh->h_dest); + + iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); + + if (eth_type == ETH_P_IP) { + cm_info->local_ip[0] = ntohl(iph->daddr); + cm_info->remote_ip[0] = ntohl(iph->saddr); + cm_info->ip_version = TCP_IPV4; + + ip_hlen = (iph->ihl) * sizeof(u32); + *payload_len = ntohs(iph->tot_len) - ip_hlen; + } else if (eth_type == ETH_P_IPV6) { + ip6h = (struct ipv6hdr *)iph; + for (i = 0; i < 4; i++) { + cm_info->local_ip[i] = + ntohl(ip6h->daddr.in6_u.u6_addr32[i]); + cm_info->remote_ip[i] = + ntohl(ip6h->saddr.in6_u.u6_addr32[i]); + } + cm_info->ip_version = TCP_IPV6; + + ip_hlen = sizeof(*ip6h); + *payload_len = ntohs(ip6h->payload_len); + } else { + DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); + return -EINVAL; + } + + tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); + + if (!tcph->syn) { + DP_NOTICE(p_hwfn, + "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", + iph->ihl, tcph->source, tcph->dest); + return -EINVAL; + } + + cm_info->local_port = ntohs(tcph->dest); + cm_info->remote_port = ntohs(tcph->source); + + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + *tcp_start_offset = eth_hlen + ip_hlen; + + return 0; +} + +static void +qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) +{ + struct qed_iwarp_ll2_buff *buf = data->cookie; + struct qed_iwarp_listener *listener; + struct qed_ll2_tx_pkt_info tx_pkt; + struct qed_iwarp_cm_info cm_info; + struct qed_hwfn *p_hwfn = cxt; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; + struct qed_iwarp_ep *ep; + int tcp_start_offset; + u8 ts_hdr_size = 0; + u8 ll2_syn_handle; + int payload_len; + u32 hdr_size; + int rc; + + memset(&cm_info, 0, sizeof(cm_info)); + ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + if (GET_FIELD(data->parse_flags, + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && + GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { + DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n"); + goto err; + } + + rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + + data->u.placement_offset, remote_mac_addr, + local_mac_addr, &payload_len, + &tcp_start_offset); + if (rc) + goto err; + + /* Check if there is a listener for this 4-tuple+vlan */ + listener = qed_iwarp_get_listener(p_hwfn, &cm_info); + if (!listener) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", + data->parse_flags, data->length.packet_length); + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.vlan = data->vlan; + + if (GET_FIELD(data->parse_flags, + PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) + SET_FIELD(tx_pkt.bd_flags, + CORE_TX_BD_DATA_VLAN_INSERTION, 1); + + tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.first_frag = buf->data_phys_addr + + data->u.placement_offset; + tx_pkt.first_frag_len = data->length.packet_length; + tx_pkt.cookie = buf; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, + &tx_pkt, true); + + if (rc) { + DP_NOTICE(p_hwfn, + "Can't post SYN back to chip rc=%d\n", rc); + goto err; + } + return; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); + /* There may be an open ep on this connection if this is a syn + * retrasnmit... need to make sure there isn't... + */ + if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) + goto err; + + ep = qed_iwarp_get_free_ep(p_hwfn); + if (!ep) + goto err; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, local_mac_addr); + + memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); + + if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) + ts_hdr_size = TIMESTAMP_HEADER_SIZE; + + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + + ts_hdr_size; + ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = listener->event_cb; + ep->cb_context = listener->cb_context; + ep->connect_mode = TCP_CONNECT_PASSIVE; + + ep->syn = buf; + ep->syn_ip_payload_length = (u16)payload_len; + ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + + tcp_start_offset; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + if (rc) { + qed_iwarp_return_ep(p_hwfn, ep); + goto err; + } + + return; +err: + qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); +} + +static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t rx_buf_addr, + bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_hwfn *p_hwfn = cxt; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, + buffer->data, buffer->data_phys_addr); + kfree(buffer); +} + +static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_hwfn *p_hwfn = cxt; + + /* this was originally an rx packet, post it back */ + qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); +} + +static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_hwfn *p_hwfn = cxt; + + if (!buffer) + return; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, + buffer->data, buffer->data_phys_addr); + + kfree(buffer); +} + +static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + int rc = 0; + + if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_syn_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle); + iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; + } + + qed_llh_remove_mac_filter(p_hwfn, + p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr); + return rc; +} + +static int +qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, + int num_rx_bufs, int buff_size, u8 ll2_handle) +{ + struct qed_iwarp_ll2_buff *buffer; + int rc = 0; + int i; + + for (i = 0; i < num_rx_bufs; i++) { + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + rc = -ENOMEM; + break; + } + + buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + buff_size, + &buffer->data_phys_addr, + GFP_KERNEL); + if (!buffer->data) { + kfree(buffer); + rc = -ENOMEM; + break; + } + + buffer->buff_size = buff_size; + rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); + if (rc) + /* buffers will be deallocated by qed_ll2 */ + break; + } + return rc; +} + +#define QED_IWARP_MAX_BUF_SIZE(mtu) \ + ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ + ETH_CACHE_LINE_SIZE) + +static int +qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params, + struct qed_ptt *p_ptt) +{ + struct qed_iwarp_info *iwarp_info; + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + int rc = 0; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; + + iwarp_info->max_mtu = params->max_mtu; + + ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr); + + rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr); + if (rc) + return rc; + + /* Start SYN connection */ + cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; + cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; + cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; + cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; + cbs.cookie = p_hwfn; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_IWARP; + data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; + data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; + data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ + data.input.tx_tc = PKT_LB_TC; + data.input.tx_dest = QED_LL2_TX_DEST_LB; + data.p_connection_handle = &iwarp_info->ll2_syn_handle; + data.cbs = &cbs; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n"); + qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr); + return rc; + } + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n"); + goto err; + } + + rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, + QED_IWARP_LL2_SYN_RX_SIZE, + QED_IWARP_MAX_SYN_PKT_SIZE, + iwarp_info->ll2_syn_handle); + if (rc) + goto err; + + return rc; +err: + qed_iwarp_ll2_stop(p_hwfn, p_ptt); + + return rc; +} + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + struct qed_iwarp_info *iwarp_info; + u32 rcv_wnd_size; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + iwarp_info->tcp_flags = QED_IWARP_TS_EN; + rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF; + + /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ + iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - + ilog2(QED_IWARP_RCV_WND_SIZE_MIN); + iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; + iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + + iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; + + iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | + MPA_RTR_TYPE_ZERO_WRITE | + MPA_RTR_TYPE_ZERO_READ; + + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); + + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, + qed_iwarp_async_event); + + return qed_iwarp_ll2_start(p_hwfn, params, p_ptt); +} + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int rc; + + qed_iwarp_free_prealloc_ep(p_hwfn); + rc = qed_iwarp_wait_for_all_cids(p_hwfn); + if (rc) + return rc; + + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); + + return qed_iwarp_ll2_stop(p_hwfn, p_ptt); +} + +void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); + + params.event = QED_IWARP_EVENT_CLOSE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? + 0 : -ECONNRESET; + + ep->state = QED_IWARP_EP_CLOSED; + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->event_cb(ep->cb_context, ¶ms); +} + +void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, int fw_ret_code) +{ + struct qed_iwarp_cm_event_params params; + bool event_cb = false; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", + ep->cid, fw_ret_code); + + switch (fw_ret_code) { + case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: + params.status = 0; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_RESET: + params.status = -ECONNRESET; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: + params.event = QED_IWARP_EVENT_RQ_EMPTY; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_IRQ_FULL: + params.event = QED_IWARP_EVENT_IRQ_FULL; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: + params.event = QED_IWARP_EVENT_LLP_TIMEOUT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: + params.event = QED_IWARP_EVENT_CQ_OVERFLOW; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: + params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: + params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: + params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; + event_cb = true; + break; + default: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Unhandled exception received...fw_ret_code=%d\n", + fw_ret_code); + break; + } + + if (event_cb) { + params.ep_context = ep; + params.cm_info = &ep->cm_info; + ep->event_cb(ep->cb_context, ¶ms); + } +} + +static void +qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + memset(¶ms, 0, sizeof(params)); + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + ep->state = QED_IWARP_EP_CLOSED; + + switch (fw_return_code) { + case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP connect got invalid packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECTION_RST: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP Connection Reset\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + default: + DP_ERR(p_hwfn, + "%s(0x%x) Unexpected return code tcp connect: %d\n", + QED_IWARP_CONNECT_MODE_STRING(ep), + ep->tcp_cid, fw_return_code); + params.status = -ECONNRESET; + break; + } + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + qed_iwarp_return_ep(p_hwfn, ep); + } else { + ep->event_cb(ep->cb_context, ¶ms); + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } +} + +void +qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + /* Done with the SYN packet, post back to ll2 rx */ + qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); + + ep->syn = NULL; + + /* If connect failed - upper layer doesn't know about it */ + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_received(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); + } else { + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_offload(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); + } +} + +static inline bool +qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + if (!ep || (ep->sig != QED_EP_SIG)) { + DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); + return false; + } + + return true; +} + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, u16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + struct regpair *fw_handle = &data->rdma_data.async_handle; + struct qed_iwarp_ep *ep = NULL; + u16 cid; + + ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, + fw_handle->lo); + + switch (fw_event_code) { + case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: + /* Async completion after TCP 3-way handshake */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", + ep->tcp_cid, fw_return_code); + qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: + /* Async completion for Close Connection ramrod */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: + /* Async event for active side only */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_reply_arrived(p_hwfn, ep); + break; + case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: + cid = (u16)le32_to_cpu(fw_handle->lo); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); + qed_iwarp_cid_cleaned(p_hwfn, cid); + + break; + case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); + + p_hwfn->p_rdma_info->events.affiliated_event( + p_hwfn->p_rdma_info->events.context, + QED_IWARP_EVENT_CQ_OVERFLOW, + (void *)fw_handle); + break; + default: + DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", + fw_event_code); + return -EINVAL; + } + return 0; +} + +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_listener *listener; + + listener = kzalloc(sizeof(*listener), GFP_KERNEL); + if (!listener) + return -ENOMEM; + + listener->ip_version = iparams->ip_version; + memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); + listener->port = iparams->port; + listener->vlan = iparams->vlan; + + listener->event_cb = iparams->event_cb; + listener->cb_context = iparams->cb_context; + listener->max_backlog = iparams->max_backlog; + oparams->handle = listener; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&listener->list_entry, + &p_hwfn->p_rdma_info->iwarp.listen_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", + listener->event_cb, + listener, + listener->ip_addr[0], + listener->ip_addr[1], + listener->ip_addr[2], + listener->ip_addr[3], listener->port, listener->vlan); + + return 0; +} + +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) +{ + struct qed_iwarp_listener *listener = handle; + struct qed_hwfn *p_hwfn = rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&listener->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + kfree(listener); + + return 0; +} + +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_iwarp_ep *ep; + struct qed_rdma_qp *qp; + int rc; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); + return -EINVAL; + } + + qp = ep->qp; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + qp->icid, ep->tcp_cid); + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + PROTOCOLID_IWARP, &init_data); + + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); + + return rc; +} + +void +qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + out_params->state = qed_iwarp2roce_state(qp->iwarp_state); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h new file mode 100644 index 000000000000..148ef3c33a5d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -0,0 +1,189 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_IWARP_H +#define _QED_IWARP_H + +enum qed_iwarp_qp_state { + QED_IWARP_QP_STATE_IDLE, + QED_IWARP_QP_STATE_RTS, + QED_IWARP_QP_STATE_TERMINATE, + QED_IWARP_QP_STATE_CLOSING, + QED_IWARP_QP_STATE_ERROR, +}; + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); + +#define QED_IWARP_PREALLOC_CNT (256) + +#define QED_IWARP_LL2_SYN_TX_SIZE (128) +#define QED_IWARP_LL2_SYN_RX_SIZE (256) +#define QED_IWARP_MAX_SYN_PKT_SIZE (128) +#define QED_IWARP_HANDLE_INVAL (0xff) + +struct qed_iwarp_ll2_buff { + void *data; + dma_addr_t data_phys_addr; + u32 buff_size; +}; + +struct qed_iwarp_info { + struct list_head listen_list; /* qed_iwarp_listener */ + struct list_head ep_list; /* qed_iwarp_ep */ + struct list_head ep_free_list; /* pre-allocated ep's */ + spinlock_t iw_lock; /* for iwarp resources */ + spinlock_t qp_lock; /* for teardown races */ + u32 rcv_wnd_scale; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 crc_needed; + u8 tcp_flags; + u8 ll2_syn_handle; + u8 peer2peer; + enum mpa_negotiation_mode mpa_rev; + enum mpa_rtr_type rtr_type; +}; + +enum qed_iwarp_ep_state { + QED_IWARP_EP_INIT, + QED_IWARP_EP_MPA_REQ_RCVD, + QED_IWARP_EP_MPA_OFFLOADED, + QED_IWARP_EP_ESTABLISHED, + QED_IWARP_EP_CLOSED +}; + +union async_output { + struct iwarp_eqe_data_mpa_async_completion mpa_response; + struct iwarp_eqe_data_tcp_async_completion mpa_request; +}; + +#define QED_MAX_PRIV_DATA_LEN (512) +struct qed_iwarp_ep_memory { + u8 in_pdata[QED_MAX_PRIV_DATA_LEN]; + u8 out_pdata[QED_MAX_PRIV_DATA_LEN]; + union async_output async_output; +}; + +/* Endpoint structure represents a TCP connection. This connection can be + * associated with a QP or not (in which case QP==NULL) + */ +struct qed_iwarp_ep { + struct list_head list_entry; + struct qed_rdma_qp *qp; + struct qed_iwarp_ep_memory *ep_buffer_virt; + dma_addr_t ep_buffer_phys; + enum qed_iwarp_ep_state state; + int sig; + struct qed_iwarp_cm_info cm_info; + enum tcp_connect_mode connect_mode; + enum mpa_rtr_type rtr_type; + enum mpa_negotiation_mode mpa_rev; + u32 tcp_cid; + u32 cid; + u16 mss; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool mpa_reply_processed; + + /* For Passive side - syn packet related data */ + u16 syn_ip_payload_length; + struct qed_iwarp_ll2_buff *syn; + dma_addr_t syn_phy_addr; + + /* The event_cb function is called for asynchrounous events associated + * with the ep. It is initialized at different entry points depending + * on whether the ep is the tcp connection active side or passive side + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; +}; + +struct qed_iwarp_listener { + struct list_head list_entry; + + /* The event_cb function is called for connection requests. + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; + u32 max_backlog; + u32 ip_addr[4]; + u16 port; + u16 vlan; + u8 ip_version; +}; + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn); + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params); + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params); + +int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal); + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +void qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params); + +int +qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams); + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams); +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle); + +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 746fed4099c8..0ba5ec8a9814 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -43,7 +43,6 @@ #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> -#include <linux/version.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/bug.h> @@ -66,26 +65,161 @@ #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 +struct qed_l2_info { + u32 queues; + unsigned long **pp_qid_usage; + + /* The lock is meant to synchronize access to the qid usage */ + struct mutex lock; +}; + +int qed_l2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_l2_info *p_l2_info; + unsigned long **pp_qids; + u32 i; + + if (!QED_IS_L2_PERSONALITY(p_hwfn)) + return 0; + + p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); + if (!p_l2_info) + return -ENOMEM; + p_hwfn->p_l2_info = p_l2_info; + + if (IS_PF(p_hwfn->cdev)) { + p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); + } else { + u8 rx = 0, tx = 0; + + qed_vf_get_num_rxqs(p_hwfn, &rx); + qed_vf_get_num_txqs(p_hwfn, &tx); + + p_l2_info->queues = max_t(u8, rx, tx); + } + + pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues, + GFP_KERNEL); + if (!pp_qids) + return -ENOMEM; + p_l2_info->pp_qid_usage = pp_qids; + + for (i = 0; i < p_l2_info->queues; i++) { + pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); + if (!pp_qids[i]) + return -ENOMEM; + } + + return 0; +} + +void qed_l2_setup(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->hw_info.personality != QED_PCI_ETH && + p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + return; + + mutex_init(&p_hwfn->p_l2_info->lock); +} + +void qed_l2_free(struct qed_hwfn *p_hwfn) +{ + u32 i; + + if (p_hwfn->hw_info.personality != QED_PCI_ETH && + p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + return; + + if (!p_hwfn->p_l2_info) + return; + + if (!p_hwfn->p_l2_info->pp_qid_usage) + goto out_l2_info; + + /* Free until hit first uninitialized entry */ + for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { + if (!p_hwfn->p_l2_info->pp_qid_usage[i]) + break; + kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); + } + + kfree(p_hwfn->p_l2_info->pp_qid_usage); + +out_l2_info: + kfree(p_hwfn->p_l2_info); + p_hwfn->p_l2_info = NULL; +} + +static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; + u16 queue_id = p_cid->rel.queue_id; + bool b_rc = true; + u8 first; + + mutex_lock(&p_l2_info->lock); + + if (queue_id >= p_l2_info->queues) { + DP_NOTICE(p_hwfn, + "Requested to increase usage for qzone %04x out of %08x\n", + queue_id, p_l2_info->queues); + b_rc = false; + goto out; + } + + first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], + MAX_QUEUES_PER_QZONE); + if (first >= MAX_QUEUES_PER_QZONE) { + b_rc = false; + goto out; + } + + __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); + p_cid->qid_usage_idx = first; + +out: + mutex_unlock(&p_l2_info->lock); + return b_rc; +} + +static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + mutex_lock(&p_hwfn->p_l2_info->lock); + + clear_bit(p_cid->qid_usage_idx, + p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); + + mutex_unlock(&p_hwfn->p_l2_info->lock); +} + void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { - /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ - if (!p_cid->is_vf && IS_PF(p_hwfn->cdev)) - qed_cxt_release_cid(p_hwfn, p_cid->cid); + bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); + + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); + + /* For PF's VFs we maintain the index inside queue-zone in IOV */ + if (p_cid->vfid == QED_QUEUE_CID_SELF) + qed_eth_queue_qid_usage_del(p_hwfn, p_cid); + vfree(p_cid); } /* The internal is only meant to be directly called by PFs initializeing CIDs * for their VFs. */ -struct qed_queue_cid * +static struct qed_queue_cid * _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, u16 opaque_fid, u32 cid, - u8 vf_qid, - struct qed_queue_start_common_params *p_params) + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params) { - bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid); struct qed_queue_cid *p_cid; int rc; @@ -96,10 +230,25 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; - p_cid->vf_qid = vf_qid; - p_cid->rel = *p_params; p_cid->p_owner = p_hwfn; + /* Fill in parameters */ + p_cid->rel.vport_id = p_params->vport_id; + p_cid->rel.queue_id = p_params->queue_id; + p_cid->rel.stats_id = p_params->stats_id; + p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->b_is_rx = b_is_rx; + p_cid->sb_idx = p_params->sb_idx; + + /* Fill-in bits related to VFs' queues if information was provided */ + if (p_vf_params) { + p_cid->vfid = p_vf_params->vfid; + p_cid->vf_qid = p_vf_params->vf_qid; + p_cid->vf_legacy = p_vf_params->vf_legacy; + } else { + p_cid->vfid = QED_QUEUE_CID_SELF; + } + /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { p_cid->abs = p_cid->rel; @@ -121,7 +270,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, /* In case of a PF configuring its VF's queues, the stats-id is already * absolute [since there's a single index that's suitable per-VF]. */ - if (b_is_same) { + if (p_cid->vfid == QED_QUEUE_CID_SELF) { rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, &p_cid->abs.stats_id); if (rc) @@ -130,27 +279,29 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->abs.stats_id = p_cid->rel.stats_id; } - /* SBs relevant information was already provided as absolute */ - p_cid->abs.sb = p_cid->rel.sb; - p_cid->abs.sb_idx = p_cid->rel.sb_idx; - - /* This is tricky - we're actually interested in whehter this is a PF - * entry meant for the VF. - */ - if (!b_is_same) - p_cid->is_vf = true; out: + /* VF-images have provided the qid_usage_idx on their own. + * Otherwise, we need to allocate a unique one. + */ + if (!p_vf_params) { + if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) + goto fail; + } else { + p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; + } + DP_VERBOSE(p_hwfn, QED_MSG_SP, - "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n", + "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", p_cid->opaque_fid, p_cid->cid, p_cid->rel.vport_id, p_cid->abs.vport_id, p_cid->rel.queue_id, + p_cid->qid_usage_idx, p_cid->abs.queue_id, p_cid->rel.stats_id, - p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); + p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); return p_cid; @@ -159,32 +310,61 @@ fail: return NULL; } -static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, - u16 opaque_fid, struct - qed_queue_start_common_params - *p_params) +struct qed_queue_cid * +qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params) { struct qed_queue_cid *p_cid; + u8 vfid = QED_CXT_PF_CID; + bool b_legacy_vf = false; u32 cid = 0; + /* In case of legacy VFs, The CID can be derived from the additional + * VF parameters - the VF assumes queue X uses CID X, so we can simply + * use the vf_qid for this purpose as well. + */ + if (p_vf_params) { + vfid = p_vf_params->vfid; + + if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { + b_legacy_vf = true; + cid = p_vf_params->vf_qid; + } + } + /* Get a unique firmware CID for this queue, in case it's a PF. * VF's don't need a CID as the queue configuration will be done * by PF. */ - if (IS_PF(p_hwfn->cdev)) { - if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { + if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &cid, vfid)) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return NULL; } } - p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params); - if (!p_cid && IS_PF(p_hwfn->cdev)) - qed_cxt_release_cid(p_hwfn, cid); + p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, + p_params, b_is_rx, p_vf_params); + if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, cid, vfid); return p_cid; } +static struct qed_queue_cid * +qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + bool b_is_rx, + struct qed_queue_start_common_params *p_params) +{ + return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, + NULL); +} + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { @@ -682,7 +862,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", p_cid->opaque_fid, p_cid->cid, - p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -698,8 +878,8 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); @@ -712,13 +892,15 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - if (p_cid->is_vf) { + if (p_cid->vfid != QED_QUEUE_CID_SELF) { + bool b_legacy_vf = !!(p_cid->vf_legacy & + QED_QCID_LEGACY_VF_RX_PROD); + p_ramrod->vf_rx_prod_index = p_cid->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", - !!p_cid->b_legacy_vf ? " [legacy]" : "", - p_cid->vf_qid); - p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf; + b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; } return qed_spq_post(p_hwfn, p_ent, NULL); @@ -762,7 +944,7 @@ qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, int rc; /* Allocate a CID for the queue */ - p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); if (!p_cid) return -ENOMEM; @@ -864,10 +1046,11 @@ qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, /* Cleaning the queue requires the completion to arrive there. * In addition, VFs require the answer to come as eqe to PF. */ - p_ramrod->complete_cqe_flg = (!p_cid->is_vf && + p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && !b_eq_completion_only) || b_cqe_completion; - p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only; + p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || + b_eq_completion_only; return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -916,8 +1099,8 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.tx_queue_start; p_ramrod->vport_id = p_cid->abs.vport_id; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); @@ -966,7 +1149,7 @@ qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid; int rc; - p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); if (!p_cid) return -EINVAL; @@ -1044,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) return action; } -static void qed_set_fw_mac_addr(__le16 *fw_msb, - __le16 *fw_mid, - __le16 *fw_lsb, - u8 *mac) -{ - ((u8 *)fw_msb)[0] = mac[1]; - ((u8 *)fw_msb)[1] = mac[0]; - ((u8 *)fw_mid)[0] = mac[3]; - ((u8 *)fw_mid)[1] = mac[2]; - ((u8 *)fw_lsb)[0] = mac[5]; - ((u8 *)fw_lsb)[1] = mac[4]; -} - static int qed_filter_ucast_common(struct qed_hwfn *p_hwfn, u16 opaque_fid, @@ -1935,15 +2105,26 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + + info->xdp_supported = true; } else { - qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); - if (cdev->num_hwfns > 1) { - u8 queues = 0; + u16 total_cids = 0; - qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); + /* Determine queues & XDP support */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u8 queues, cids; + + qed_vf_get_num_cids(p_hwfn, &cids); + qed_vf_get_num_rxqs(p_hwfn, &queues); info->num_queues += queues; + total_cids += cids; } + /* Enable VF XDP in case PF guarntees sufficient connections */ + if (total_cids >= info->num_queues * 3) + info->xdp_supported = true; + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], (u8 *)&info->num_vlan_filters); qed_vf_get_num_mac_filters(&cdev->hwfns[0], @@ -2194,9 +2375,9 @@ static int qed_start_rxq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } @@ -2244,9 +2425,9 @@ static int qed_start_txq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } @@ -2301,14 +2482,25 @@ static int qed_tunn_configure(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; struct qed_tunnel_info *tun; tun = &hwfn->cdev->tunnel; + if (IS_PF(cdev)) { + p_ptt = qed_ptt_acquire(hwfn); + if (!p_ptt) + return -EAGAIN; + } else { + p_ptt = NULL; + } - rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, + rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, QED_SPQ_MODE_EBLOCK, NULL); - if (rc) + if (rc) { + if (IS_PF(cdev)) + qed_ptt_release(hwfn, p_ptt); return rc; + } if (IS_PF_SRIOV(hwfn)) { u16 vxlan_port, geneve_port; @@ -2325,6 +2517,8 @@ static int qed_tunn_configure(struct qed_dev *cdev, qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } + if (IS_PF(cdev)) + qed_ptt_release(hwfn, p_ptt); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 6f44229899eb..f8f09aadced7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -277,40 +277,87 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); -struct qed_queue_cid { - /* 'Relative' is a relative term ;-). Usually the indices [not counting - * SBs] would be PF-relative, but there are some cases where that isn't - * the case - specifically for a PF configuring its VF indices it's - * possible some fields [E.g., stats-id] in 'rel' would already be abs. +#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) +#define QED_QUEUE_CID_SELF (0xff) + +/* Almost identical to the qed_queue_start_common_params, + * but here we maintain the SB index in IGU CAM. + */ +struct qed_queue_cid_params { + u8 vport_id; + u16 queue_id; + u8 stats_id; +}; + +/* Additional parameters required for initialization of the queue_cid + * and are relevant only for a PF initializing one for its VFs. + */ +struct qed_queue_cid_vf_params { + /* Should match the VF's relative index */ + u8 vfid; + + /* 0-based queue index. Should reflect the relative qzone the + * VF thinks is associated with it [in its range]. + */ + u8 vf_qid; + + /* Indicates a VF is legacy, making it differ in several things: + * - Producers would be placed in a different place. + * - Makes assumptions regarding the CIDs. */ - struct qed_queue_start_common_params rel; - struct qed_queue_start_common_params abs; + u8 vf_legacy; + + u8 qid_usage_idx; +}; + +struct qed_queue_cid { + /* For stats-id, the `rel' is actually absolute as well */ + struct qed_queue_cid_params rel; + struct qed_queue_cid_params abs; + + /* These have no 'relative' meaning */ + u16 sb_igu_id; + u8 sb_idx; + u32 cid; u16 opaque_fid; + bool b_is_rx; + /* VFs queues are mapped differently, so we need to know the * relative queue associated with them [0-based]. * Notice this is relevant on the *PF* queue-cid of its VF's queues, * and not on the VF itself. */ - bool is_vf; + u8 vfid; u8 vf_qid; - /* Legacy VFs might have Rx producer located elsewhere */ - bool b_legacy_vf; + /* We need an additional index to differentiate between queues opened + * for same queue-zone, as VFs would have to communicate the info + * to the PF [otherwise PF has no way to differentiate]. + */ + u8 qid_usage_idx; + + u8 vf_legacy; +#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0)) +#define QED_QCID_LEGACY_VF_CID (BIT(1)) struct qed_hwfn *p_owner; }; +int qed_l2_alloc(struct qed_hwfn *p_hwfn); +void qed_l2_setup(struct qed_hwfn *p_hwfn); +void qed_l2_free(struct qed_hwfn *p_hwfn); + void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); -struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - u8 vf_qid, - struct qed_queue_start_common_params - *p_params); +struct qed_queue_cid * +qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params); int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 09c86411918c..c06ad4f0758e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -38,7 +38,6 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/stddef.h> -#include <linux/version.h> #include <linux/workqueue.h> #include <net/ipv6.h> #include <linux/bitops.h> @@ -62,7 +61,7 @@ #include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" -#include "qed_roce.h" +#include "qed_rdma.h" #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) @@ -74,7 +73,6 @@ struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; u8 handle; - bool frags_mapped; /* Lock protecting LL2 buffer lists in sleepless context */ spinlock_t lock; @@ -90,13 +88,14 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; -static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, +static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { + struct qed_hwfn *p_hwfn = cxt; struct qed_dev *cdev = p_hwfn->cdev; struct sk_buff *skb = cookie; @@ -108,12 +107,6 @@ static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, b_last_fragment); - if (cdev->ll2->frags_mapped) - /* Case where mapped frags were received, need to - * free skb with nr_frags marked as 0 - */ - skb_shinfo(skb)->nr_frags = 0; - dev_kfree_skb_any(skb); } @@ -165,42 +158,34 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) qed_ll2_dealloc_buffer(cdev, buffer); } -static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - struct qed_ll2_rx_packet *p_pkt, - struct core_rx_fast_path_cqe *p_cqe, - bool b_last_packet) +void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) { - u16 packet_length = le16_to_cpu(p_cqe->packet_length); - struct qed_ll2_buffer *buffer = p_pkt->cookie; + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_buffer *buffer = data->cookie; struct qed_dev *cdev = p_hwfn->cdev; - u16 vlan = le16_to_cpu(p_cqe->vlan); - u32 opaque_data_0, opaque_data_1; - u8 pad = p_cqe->placement_offset; dma_addr_t new_phys_addr; struct sk_buff *skb; bool reuse = false; int rc = -EINVAL; u8 *new_data; - opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]); - opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]); - DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", - (u64)p_pkt->rx_buf_addr, pad, packet_length, - le16_to_cpu(p_cqe->parse_flags.flags), vlan, - opaque_data_0, opaque_data_1); + (u64)data->rx_buf_addr, + data->u.placement_offset, + data->length.packet_length, + data->parse_flags, + data->vlan, data->opaque_data_0, data->opaque_data_1); if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, - buffer->data, packet_length, false); + buffer->data, data->length.packet_length, false); } /* Determine if data is valid */ - if (packet_length < ETH_HLEN) + if (data->length.packet_length < ETH_HLEN) reuse = true; /* Allocate a replacement for buffer; Reuse upon failure */ @@ -220,9 +205,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, goto out_post; } - pad += NET_SKB_PAD; - skb_reserve(skb, pad); - skb_put(skb, packet_length); + data->u.placement_offset += NET_SKB_PAD; + skb_reserve(skb, data->u.placement_offset); + skb_put(skb, data->length.packet_length); skb_checksum_none_assert(skb); /* Get parital ethernet information instead of eth_type_trans(), @@ -233,10 +218,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, /* Pass SKB onward */ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { - if (vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + if (data->vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + data->vlan); cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, - opaque_data_0, opaque_data_1); + data->opaque_data_0, + data->opaque_data_1); } /* Update Buffer information and update FW producer */ @@ -322,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -334,21 +321,12 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->conn.gsi_enable) - qed_ll2b_release_tx_gsi_packet(p_hwfn, - p_ll2_conn-> - my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); + p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); } } } @@ -361,7 +339,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_tx_packet *p_pkt; bool b_last_frag = false; unsigned long flags; - dma_addr_t tx_frag; int rc = -EINVAL; spin_lock_irqsave(&p_tx->lock, flags); @@ -402,19 +379,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); spin_unlock_irqrestore(&p_tx->lock, flags); - tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->conn.gsi_enable) - qed_ll2b_complete_tx_gsi_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, !num_bds); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, !num_bds); + + p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + p_pkt->bds_set[0].tx_frag, + b_last_frag, !num_bds); + spin_lock_irqsave(&p_tx->lock, flags); } @@ -425,81 +396,71 @@ out: return rc; } -static int -qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - union core_rx_cqe_union *p_cqe, - unsigned long lock_flags, bool b_last_cqe) +static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) { - struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue; - struct qed_ll2_rx_packet *p_pkt = NULL; - u16 packet_length, parse_flags, vlan; - u32 src_mac_addrhi; - u16 src_mac_addrlo; - - if (!list_empty(&p_rx->active_descq)) - p_pkt = list_first_entry(&p_rx->active_descq, - struct qed_ll2_rx_packet, list_entry); - if (!p_pkt) { - DP_NOTICE(p_hwfn, - "GSI Rx completion but active_descq is empty\n"); - return -EIO; - } - - list_del(&p_pkt->list_entry); - parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); - packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); - vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); - src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); - src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); - if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) - DP_NOTICE(p_hwfn, - "Mismatch between active_descq and the LL2 Rx chain\n"); - list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - - spin_unlock_irqrestore(&p_rx->lock, lock_flags); - qed_ll2b_complete_rx_gsi_packet(p_hwfn, - p_ll2_info->my_id, - p_pkt->cookie, - p_pkt->rx_buf_addr, - packet_length, - p_cqe->rx_cqe_gsi.data_length_error, - parse_flags, - vlan, - src_mac_addrhi, - src_mac_addrlo, b_last_cqe); - spin_lock_irqsave(&p_rx->lock, lock_flags); + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); + data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); + data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); + data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; +} - return 0; +static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) +{ + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); + data->length.packet_length = + le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); + data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); + data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; } -static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn, - union core_rx_cqe_union *p_cqe, - unsigned long *p_lock_flags, - bool b_last_cqe) +static int +qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, + unsigned long *p_lock_flags, bool b_last_cqe) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_packet *p_pkt = NULL; + struct qed_ll2_comp_rx_data data; if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) { DP_NOTICE(p_hwfn, - "LL2 Rx completion but active_descq is empty\n"); + "[%d] LL2 Rx completion but active_descq is empty\n", + p_ll2_conn->input.conn_type); + return -EIO; } list_del(&p_pkt->list_entry); + if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) + qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); + else + qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + data.connection_handle = p_ll2_conn->my_id; + data.cookie = p_pkt->cookie; + data.rx_buf_addr = p_pkt->rx_buf_addr; + data.b_last_packet = b_last_cqe; + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); - qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, - p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); + p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; @@ -507,7 +468,7 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) { - struct qed_ll2_info *p_ll2_conn = cookie; + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; @@ -521,7 +482,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) while (cq_new_idx != cq_old_idx) { bool b_last_cqe = (cq_new_idx == cq_old_idx); - cqe = qed_chain_consume(&p_rx->rcq_chain); + cqe = + (union core_rx_cqe_union *) + qed_chain_consume(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); DP_VERBOSE(p_hwfn, @@ -535,13 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) rc = -EINVAL; break; case CORE_RX_CQE_TYPE_GSI_OFFLOAD: - rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn, - cqe, flags, b_last_cqe); - break; case CORE_RX_CQE_TYPE_REGULAR: - rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, - cqe, &flags, - b_last_cqe); + rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, + cqe, &flags, + b_last_cqe); break; default: rc = -EIO; @@ -565,10 +525,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_rx = &p_ll2_conn->rx_queue; while (!list_empty(&p_rx->active_descq)) { - dma_addr_t rx_buf_addr; - void *cookie; - bool b_last; - p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) @@ -576,22 +532,26 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); } else { - rx_buf_addr = p_pkt->rx_buf_addr; - cookie = p_pkt->cookie; + dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; + void *cookie = p_pkt->cookie; + bool b_last; b_last = list_empty(&p_rx->active_descq); + p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + cookie, + rx_buf_addr, b_last); } } } -#if IS_ENABLED(CONFIG_QED_ISCSI) static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) { u8 bd_flags = 0; @@ -741,12 +701,13 @@ static void qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + struct qed_ll2_tx_pkt_info tx_pkt; struct qed_ooo_buffer *p_buffer; - int rc; u16 l4_hdr_offset_w; dma_addr_t first_frag; u16 parse_flags; u8 bd_flags; + int rc; /* Submit Tx buffers here */ while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, @@ -761,13 +722,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); - rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, - p_buffer->vlan, bd_flags, - l4_hdr_offset_w, - p_ll2_conn->conn.tx_dest, 0, - first_frag, - p_buffer->packet_length, - p_buffer, true); + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.vlan = p_buffer->vlan; + tx_pkt.bd_flags = bd_flags; + tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; + tx_pkt.tx_dest = p_ll2_conn->tx_dest; + tx_pkt.first_frag = first_frag; + tx_pkt.first_frag_len = p_buffer->packet_length; + tx_pkt.cookie = p_buffer; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, + &tx_pkt, true); if (rc) { qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer, false); @@ -874,85 +840,6 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) return 0; } -static int -qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 rx_num_ooo_buffers, u16 mtu) -{ - struct qed_ooo_buffer *p_buf = NULL; - void *p_virt; - u16 buf_idx; - int rc = 0; - - if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) - return rc; - - if (!rx_num_ooo_buffers) - return -EINVAL; - - for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) { - p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); - if (!p_buf) { - rc = -ENOMEM; - goto out; - } - - p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; - p_buf->rx_buffer_size = (p_buf->rx_buffer_size + - ETH_CACHE_LINE_SIZE - 1) & - ~(ETH_CACHE_LINE_SIZE - 1); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_buf->rx_buffer_size, - &p_buf->rx_buffer_phys_addr, - GFP_KERNEL); - if (!p_virt) { - kfree(p_buf); - rc = -ENOMEM; - goto out; - } - - p_buf->rx_buffer_virt_addr = p_virt; - qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); - } - - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", - rx_num_ooo_buffers, p_buf->rx_buffer_size); - -out: - return rc; -} - -static void -qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) -{ - if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) - return; - - qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); - qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); -} - -static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) -{ - struct qed_ooo_buffer *p_buffer; - - if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) - return; - - qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); - while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, - p_hwfn->p_ooo_info))) { - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_buffer->rx_buffer_size, - p_buffer->rx_buffer_virt_addr, - p_buffer->rx_buffer_phys_addr); - kfree(p_buffer); - } -} - static void qed_ll2_stop_ooo(struct qed_dev *cdev) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -966,69 +853,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev) *handle = QED_LL2_UNUSED_HANDLE; } -static int qed_ll2_start_ooo(struct qed_dev *cdev, - struct qed_ll2_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; - struct qed_ll2_conn ll2_info = { 0 }; - int rc; - - ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; - ll2_info.mtu = params->mtu; - ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; - ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; - ll2_info.tx_tc = OOO_LB_TC; - ll2_info.tx_dest = CORE_TX_DEST_LB; - - rc = qed_ll2_acquire_connection(hwfn, &ll2_info, - QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, - handle); - if (rc) { - DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); - goto out; - } - - rc = qed_ll2_establish_connection(hwfn, *handle); - if (rc) { - DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); - goto fail; - } - - return 0; - -fail: - qed_ll2_release_connection(hwfn, *handle); -out: - *handle = QED_LL2_UNUSED_HANDLE; - return rc; -} -#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ -static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, - void *p_cookie) { return -EINVAL; } -static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, - void *p_cookie) { return -EINVAL; } -static inline int -qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 rx_num_ooo_buffers, u16 mtu) { return 0; } -static inline void -qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) { return; } -static inline void -qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) { return; } -static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; } -static inline int qed_ll2_start_ooo(struct qed_dev *cdev, - struct qed_ll2_params *params) - { return -EINVAL; } -#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ - static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, u8 action_on_error) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct core_rx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1054,22 +883,21 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->complete_event_flg = 1; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); - DMA_REGPAIR_LE(p_ramrod->bd_base, - p_rx->rxq_chain.p_phys_addr); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); + DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); - p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg; - p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en; + p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; + p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 - : 1; + p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { + p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && + (conn_type != QED_LL2_TYPE_IWARP)) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { @@ -1078,14 +906,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, } p_ramrod->action_on_error.error_type = action_on_error; - p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct core_tx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1096,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1117,7 +945,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; @@ -1126,11 +954,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); p_ramrod->pbl_size = cpu_to_le16(pbl_size); - switch (p_ll2_conn->conn.tx_tc) { - case LB_TC: + switch (p_ll2_conn->input.tx_tc) { + case PURE_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); break; - case OOO_LB_TC: + case PKT_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); break; default: @@ -1145,18 +973,27 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->conn_type = PROTOCOLID_FCOE; break; case QED_LL2_TYPE_ISCSI: - case QED_LL2_TYPE_ISCSI_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; break; case QED_LL2_TYPE_ROCE: p_ramrod->conn_type = PROTOCOLID_ROCE; break; + case QED_LL2_TYPE_IWARP: + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; + case QED_LL2_TYPE_OOO: + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + p_ramrod->conn_type = PROTOCOLID_ISCSI; + else + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; default: p_ramrod->conn_type = PROTOCOLID_ETH; DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); } - p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; + return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -1212,22 +1049,22 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, u16 rx_num_desc) + struct qed_ll2_info *p_ll2_info) { struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; - if (!rx_num_desc) + if (!p_ll2_info->input.rx_num_desc) goto out; rc = qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, QED_CHAIN_CNT_TYPE_U16, - rx_num_desc, + p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain); + &p_ll2_info->rx_queue.rxq_chain, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1247,9 +1084,9 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - rx_num_desc, + p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain); + &p_ll2_info->rx_queue.rcq_chain, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1257,30 +1094,29 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn.conn_type, rx_num_desc); + p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); out: return rc; } static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 tx_num_desc) + struct qed_ll2_info *p_ll2_info) { struct qed_ll2_tx_packet *p_descq; u32 capacity; int rc = 0; - if (!tx_num_desc) + if (!p_ll2_info->input.tx_num_desc) goto out; rc = qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - tx_num_desc, + p_ll2_info->input.tx_num_desc, sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain); + &p_ll2_info->tx_queue.txq_chain, NULL); if (rc) goto out; @@ -1295,28 +1131,112 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn.conn_type, tx_num_desc); + p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); out: if (rc) DP_NOTICE(p_hwfn, "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", - tx_num_desc); + p_ll2_info->input.tx_num_desc); + return rc; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) + return rc; + + /* Correct number of requested OOO buffers if needed */ + if (!p_ll2_info->input.rx_num_ooo_buffers) { + u16 num_desc = p_ll2_info->input.rx_num_desc; + + if (!num_desc) + return -EINVAL; + p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; + } + + for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; + buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: return rc; } -int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_conn *p_params, - u16 rx_num_desc, - u16 tx_num_desc, - u8 *p_connection_handle) +static int +qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) { + if (!cbs || (!cbs->rx_comp_cb || + !cbs->rx_release_cb || + !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) + return -EINVAL; + + p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; + p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; + p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; + p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; + p_ll2_info->cbs.cookie = cbs->cookie; + + return 0; +} + +static enum core_error_handle +qed_ll2_get_error_choice(enum qed_ll2_error_handle err) +{ + switch (err) { + case QED_LL2_DROP_PACKET: + return LL2_DROP_PACKET; + case QED_LL2_DO_NOTHING: + return LL2_DO_NOTHING; + case QED_LL2_ASSERT: + return LL2_ASSERT; + default: + return LL2_DO_NOTHING; + } +} + +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) +{ + struct qed_hwfn *p_hwfn = cxt; qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; struct qed_ll2_info *p_ll2_info = NULL; + u8 i, *p_tx_max; int rc; - u8 i; - if (!p_connection_handle || !p_hwfn->p_ll2_info) + if (!data->p_connection_handle || !p_hwfn->p_ll2_info) return -EINVAL; /* Find a free connection to be used */ @@ -1335,23 +1255,40 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, if (!p_ll2_info) return -EBUSY; - p_ll2_info->conn = *p_params; + memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); + + p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? + CORE_TX_DEST_NW : CORE_TX_DEST_LB; - rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); + /* Correct maximum number of Tx BDs */ + p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; + if (*p_tx_max == 0) + *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; + else + *p_tx_max = min_t(u8, *p_tx_max, + CORE_LL2_TX_MAX_BDS_PER_PACKET); + + rc = qed_ll2_set_cbs(p_ll2_info, data->cbs); + if (rc) { + DP_NOTICE(p_hwfn, "Invalid callback functions\n"); + goto q_allocate_fail; + } + + rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; - rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc); + rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, - rx_num_desc * 2, p_params->mtu); + data->input.mtu); if (rc) goto q_allocate_fail; /* Register callbacks for the Rx/Tx queues */ - if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (data->input.conn_type == QED_LL2_TYPE_OOO) { comp_rx_cb = qed_ll2_lb_rxq_completion; comp_tx_cb = qed_ll2_lb_txq_completion; } else { @@ -1359,7 +1296,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, comp_tx_cb = qed_ll2_txq_completion; } - if (rx_num_desc) { + if (data->input.rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, &p_hwfn->p_ll2_info[i], &p_ll2_info->rx_queue.rx_sb_index, @@ -1367,7 +1304,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, p_ll2_info->rx_queue.b_cb_registred = true; } - if (tx_num_desc) { + if (data->input.tx_num_desc) { qed_int_register_cb(p_hwfn, comp_tx_cb, &p_hwfn->p_ll2_info[i], @@ -1376,7 +1313,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, p_ll2_info->tx_queue.b_cb_registred = true; } - *p_connection_handle = i; + *data->p_connection_handle = i; return rc; q_allocate_fail: @@ -1387,24 +1324,39 @@ q_allocate_fail: static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + enum qed_ll2_error_handle error_input; + enum core_error_handle error_mode; u8 action_on_error = 0; if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); - + error_input = p_ll2_conn->input.ai_err_packet_too_big; + error_mode = qed_ll2_get_error_choice(error_input); SET_FIELD(action_on_error, - CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, - p_ll2_conn->conn.ai_err_packet_too_big); - SET_FIELD(action_on_error, - CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf); + CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); + error_input = p_ll2_conn->input.ai_err_no_buf; + error_mode = qed_ll2_get_error_choice(error_input); + SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); } -int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) +static void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); +} + +int qed_ll2_establish_connection(void *cxt, u8 connection_handle) +{ + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; @@ -1477,12 +1429,12 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (rc) goto out; - if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { qed_llh_add_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); @@ -1531,11 +1483,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); } -int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, +int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw) { + struct qed_hwfn *p_hwfn = cxt; struct core_rx_bd_with_buff_len *p_curb = NULL; struct qed_ll2_rx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn; @@ -1594,20 +1547,18 @@ out: static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, struct qed_ll2_tx_queue *p_tx, struct qed_ll2_tx_packet *p_curp, - u8 num_of_bds, - dma_addr_t first_frag, - u16 first_frag_len, void *p_cookie, + struct qed_ll2_tx_pkt_info *pkt, u8 notify_fw) { list_del(&p_curp->list_entry); - p_curp->cookie = p_cookie; - p_curp->bd_used = num_of_bds; + p_curp->cookie = pkt->cookie; + p_curp->bd_used = pkt->num_of_bds; p_curp->notify_fw = notify_fw; p_tx->cur_send_packet = p_curp; p_tx->cur_send_frag_num = 0; - p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag; - p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len; + p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; + p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; p_tx->cur_send_frag_num++; } @@ -1615,51 +1566,52 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2, struct qed_ll2_tx_packet *p_curp, - u8 num_of_bds, - enum core_tx_dest tx_dest, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum core_roce_flavor_type roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len) + struct qed_ll2_tx_pkt_info *pkt) { struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); struct core_tx_bd *start_bd = NULL; + enum core_roce_flavor_type roce_flavor; + enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; + roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE + : CORE_RROCE; + + tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW + : CORE_TX_DEST_LB; + start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); + start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, - cpu_to_le16(l4_hdr_offset_w)); + cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); - bd_data |= bd_flags; + bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); - SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds); + SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); - DMA_REGPAIR_LE(start_bd->addr, first_frag); - start_bd->nbytes = cpu_to_le16(first_frag_len); + DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); + start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", p_ll2->queue_id, p_ll2->cid, - p_ll2->conn.conn_type, + p_ll2->input.conn_type, prod_idx, - first_frag_len, - num_of_bds, + pkt->first_frag_len, + pkt->num_of_bds, le32_to_cpu(start_bd->addr.hi), le32_to_cpu(start_bd->addr.lo)); - if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds) + if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) return; /* Need to provide the packet with additional BDs for frags */ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; - frag_idx < num_of_bds; frag_idx++) { + frag_idx < pkt->num_of_bds; frag_idx++) { struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); @@ -1722,26 +1674,20 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, - p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod); + p_ll2_conn->cid, + p_ll2_conn->input.conn_type, db_msg.spq_prod); } -int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, - u8 num_of_bds, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum qed_ll2_tx_dest e_tx_dest, - enum qed_ll2_roce_flavor_type qed_roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len, void *cookie, u8 notify_fw) + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn = NULL; - enum core_roce_flavor_type roce_flavor; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; - enum core_tx_dest tx_dest; unsigned long flags; int rc = 0; @@ -1751,7 +1697,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) + if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); @@ -1764,7 +1710,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, if (!list_empty(&p_tx->free_descq)) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); - if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds) + if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) p_curp = NULL; if (!p_curp) { @@ -1772,26 +1718,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, goto out; } - tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW : - CORE_TX_DEST_LB; - if (qed_roce_flavor == QED_LL2_ROCE) { - roce_flavor = CORE_ROCE; - } else if (qed_roce_flavor == QED_LL2_RROCE) { - roce_flavor = CORE_RROCE; - } else { - rc = -EINVAL; - goto out; - } - /* Prepare packet and BD, and perhaps send a doorbell to FW */ - qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, - num_of_bds, first_frag, - first_frag_len, cookie, notify_fw); - qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, - num_of_bds, tx_dest, - vlan, bd_flags, l4_hdr_offset_w, - roce_flavor, - first_frag, first_frag_len); + qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); + + qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); @@ -1800,11 +1730,12 @@ out: return rc; } -int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes) { struct qed_ll2_tx_packet *p_cur_send_packet = NULL; + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; u16 cur_send_frag_num = 0; struct core_tx_bd *p_bd; @@ -1839,8 +1770,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, return 0; } -int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; int rc = -EINVAL; struct qed_ptt *p_ptt; @@ -1870,10 +1802,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); @@ -1887,8 +1819,28 @@ out: return rc; } -void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) +static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { + struct qed_ooo_buffer *p_buffer; + + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } +} + +void qed_ll2_release_connection(void *cxt, u8 connection_handle) +{ + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); @@ -1921,7 +1873,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) mutex_unlock(&p_ll2_conn->mutex); } -struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn) +int qed_ll2_alloc(struct qed_hwfn *p_hwfn) { struct qed_ll2_info *p_ll2_connections; u8 i; @@ -1931,28 +1883,52 @@ struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn) sizeof(struct qed_ll2_info), GFP_KERNEL); if (!p_ll2_connections) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); - return NULL; + return -ENOMEM; } for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) p_ll2_connections[i].my_id = i; - return p_ll2_connections; + p_hwfn->p_ll2_info = p_ll2_connections; + return 0; } -void qed_ll2_setup(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_connections) +void qed_ll2_setup(struct qed_hwfn *p_hwfn) { int i; for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) - mutex_init(&p_ll2_connections[i].mutex); + mutex_init(&p_hwfn->p_ll2_info[i].mutex); } -void qed_ll2_free(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_connections) +void qed_ll2_free(struct qed_hwfn *p_hwfn) { - kfree(p_ll2_connections); + if (!p_hwfn->p_ll2_info) + return; + + kfree(p_hwfn->p_ll2_info); + p_hwfn->p_ll2_info = NULL; +} + +static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_ll2_stats *p_stats) +{ + struct core_ll2_port_stats port_stats; + + memset(&port_stats, 0, sizeof(port_stats)); + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + BAR0_MAP_REG_TSDM_RAM + + TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), + sizeof(port_stats)); + + p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); + p_stats->gsi_invalid_pkt_length = + HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); + p_stats->gsi_unsupported_pkt_typ = + HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); + p_stats->gsi_crcchksm_error = + HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); } static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, @@ -2018,9 +1994,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts); } -int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, +int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ptt *p_ptt; @@ -2038,6 +2015,8 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (p_ll2_conn->input.gsi_enable) + _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); if (p_ll2_conn->tx_stats_en) @@ -2047,6 +2026,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, return 0; } +static void qed_ll2b_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet) +{ + struct qed_hwfn *p_hwfn = cxt; + + qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie); +} + static void qed_ll2_register_cb_ops(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie) @@ -2055,21 +2045,86 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, cdev->ll2->cb_cookie = cookie; } +struct qed_ll2_cbs ll2_cbs = { + .rx_comp_cb = &qed_ll2b_complete_rx_packet, + .rx_release_cb = &qed_ll2b_release_rx_packet, + .tx_comp_cb = &qed_ll2b_complete_tx_packet, + .tx_release_cb = &qed_ll2b_complete_tx_packet, +}; + +static void qed_ll2_set_conn_data(struct qed_dev *cdev, + struct qed_ll2_acquire_data *data, + struct qed_ll2_params *params, + enum qed_ll2_conn_type conn_type, + u8 *handle, bool lb) +{ + memset(data, 0, sizeof(*data)); + + data->input.conn_type = conn_type; + data->input.mtu = params->mtu; + data->input.rx_num_desc = QED_LL2_RX_SIZE; + data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; + data->input.rx_vlan_removal_en = params->rx_vlan_stripping; + data->input.tx_num_desc = QED_LL2_TX_SIZE; + data->p_connection_handle = handle; + data->cbs = &ll2_cbs; + ll2_cbs.cookie = QED_LEADING_HWFN(cdev); + + if (lb) { + data->input.tx_tc = PKT_LB_TC; + data->input.tx_dest = QED_LL2_TX_DEST_LB; + } else { + data->input.tx_tc = 0; + data->input.tx_dest = QED_LL2_TX_DEST_NW; + } +} + +static int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_acquire_data data; + int rc; + + qed_ll2_set_conn_data(cdev, &data, params, + QED_LL2_TYPE_OOO, handle, true); + + rc = qed_ll2_acquire_connection(hwfn, &data); + if (rc) { + DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); + goto out; + } + + rc = qed_ll2_establish_connection(hwfn, *handle); + if (rc) { + DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); + goto fail; + } + + return 0; + +fail: + qed_ll2_release_connection(hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; +} + static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) { - struct qed_ll2_conn ll2_info; struct qed_ll2_buffer *buffer, *tmp_buffer; enum qed_ll2_conn_type conn_type; + struct qed_ll2_acquire_data data; struct qed_ptt *p_ptt; int rc, i; - u8 gsi_enable = 1; + /* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); spin_lock_init(&cdev->ll2->lock); cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + L1_CACHE_BYTES + params->mtu; - cdev->ll2->frags_mapped = params->frags_mapped; /*Allocate memory for LL2 */ DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n", @@ -2094,11 +2149,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { case QED_PCI_FCOE: conn_type = QED_LL2_TYPE_FCOE; - gsi_enable = 0; break; case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; - gsi_enable = 0; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@ -2107,20 +2160,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) conn_type = QED_LL2_TYPE_TEST; } - /* Prepare the temporary ll2 information */ - memset(&ll2_info, 0, sizeof(ll2_info)); + qed_ll2_set_conn_data(cdev, &data, params, conn_type, + &cdev->ll2->handle, false); - ll2_info.conn_type = conn_type; - ll2_info.mtu = params->mtu; - ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; - ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; - ll2_info.tx_tc = 0; - ll2_info.tx_dest = CORE_TX_DEST_NW; - ll2_info.gsi_enable = gsi_enable; - - rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, - QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, - &cdev->ll2->handle); + rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data); if (rc) { DP_INFO(cdev, "Failed to acquire LL2 connection\n"); goto fail; @@ -2243,6 +2286,7 @@ fail: static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) { + struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; int rc = -EINVAL, i; dma_addr_t mapping; @@ -2277,32 +2321,30 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); } - rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), - cdev->ll2->handle, - 1 + skb_shinfo(skb)->nr_frags, - vlan, flags, 0, QED_LL2_TX_DEST_NW, - 0 /* RoCE FLAVOR */, - mapping, skb->len, skb, 1); + memset(&pkt, 0, sizeof(pkt)); + pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.vlan = vlan; + pkt.bd_flags = flags; + pkt.tx_dest = QED_LL2_TX_DEST_NW; + pkt.first_frag = mapping; + pkt.first_frag_len = skb->len; + pkt.cookie = skb; + + rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, + &pkt, 1); if (rc) goto err; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - if (!cdev->ll2->frags_mapped) { - mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(&cdev->pdev->dev, - mapping))) { - DP_NOTICE(cdev, - "Unable to map frag - dropping packet\n"); - rc = -ENOMEM; - goto err; - } - } else { - mapping = page_to_phys(skb_frag_page(frag)) | - frag->page_offset; + + mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { + DP_NOTICE(cdev, + "Unable to map frag - dropping packet\n"); + goto err; } rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 31a409033c41..a822528e9c63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -47,29 +47,6 @@ #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) -enum qed_ll2_roce_flavor_type { - QED_LL2_ROCE, - QED_LL2_RROCE, - MAX_QED_LL2_ROCE_FLAVOR_TYPE -}; - -enum qed_ll2_conn_type { - QED_LL2_TYPE_FCOE, - QED_LL2_TYPE_ISCSI, - QED_LL2_TYPE_TEST, - QED_LL2_TYPE_ISCSI_OOO, - QED_LL2_TYPE_RESERVED2, - QED_LL2_TYPE_ROCE, - QED_LL2_TYPE_RESERVED3, - MAX_QED_LL2_RX_CONN_TYPE -}; - -enum qed_ll2_tx_dest { - QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ - QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ - QED_LL2_TX_DEST_MAX -}; - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -135,30 +112,21 @@ struct qed_ll2_tx_queue { bool b_completing_packet; }; -struct qed_ll2_conn { - enum qed_ll2_conn_type conn_type; - u16 mtu; - u8 rx_drop_ttl0_flg; - u8 rx_vlan_removal_en; - u8 tx_tc; - enum core_tx_dest tx_dest; - enum core_error_handle ai_err_packet_too_big; - enum core_error_handle ai_err_no_buf; - u8 gsi_enable; -}; - struct qed_ll2_info { /* Lock protecting the state of LL2 */ struct mutex mutex; - struct qed_ll2_conn conn; + + struct qed_ll2_acquire_data_inputs input; u32 cid; u8 my_id; u8 queue_id; u8 tx_stats_id; bool b_active; + enum core_tx_dest tx_dest; u8 tx_stats_en; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; + struct qed_ll2_cbs cbs; }; /** @@ -166,38 +134,30 @@ struct qed_ll2_info { * starts rx & tx (if relevant) queues pair. Provides * connecion handler as output parameter. * - * @param p_hwfn - * @param p_params Contain various configuration properties - * @param rx_num_desc - * @param tx_num_desc - * - * @param p_connection_handle Output container for LL2 connection's handle * - * @return 0 on success, failure otherwise + * @param cxt - pointer to the hw-function [opaque to some] + * @param data - describes connection parameters + * @return int */ -int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_conn *p_params, - u16 rx_num_desc, - u16 tx_num_desc, - u8 *p_connection_handle); +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** * @brief qed_ll2_establish_connection - start previously * allocated LL2 queues pair * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param p_ptt * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * * @return 0 on success, failure otherwise */ -int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * @param addr rx (physical address) buffers to submit @@ -206,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); * * @return 0 on success, failure otherwise */ -int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, +int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw); @@ -215,53 +175,34 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, * @brief qed_ll2_prepare_tx_packet - request for start Tx BD * to prepare Tx packet submission to FW. * - * @param p_hwfn - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param num_of_bds a number of requested BD equals a number of - * fragments in Tx packet - * @param vlan VLAN to insert to packet (if insertion set) - * @param bd_flags - * @param l4_hdr_offset_w L4 Header Offset from start of packet - * (in words). This is needed if both l4_csum - * and ipv6_ext are set - * @param e_tx_dest indicates if the packet is to be transmitted via - * loopback or to the network - * @param first_frag - * @param first_frag_len - * @param cookie - * - * @param notify_fw + * @param cxt - pointer to the hw-function [opaque to some] + * @param connection_handle + * @param pkt - info regarding the tx packet + * @param notify_fw - issue doorbell to fw for this packet * * @return 0 on success, failure otherwise */ -int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, - u8 num_of_bds, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum qed_ll2_tx_dest e_tx_dest, - enum qed_ll2_roce_flavor_type qed_roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len, void *cookie, u8 notify_fw); + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); /** * @brief qed_ll2_release_connection - releases resources * allocated for LL2 connection * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection */ -void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill * Tx BD of BDs requested by * qed_ll2_prepare_tx_packet * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle * obtained from * qed_ll2_require_connection @@ -270,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); * * @return 0 on success, failure otherwise */ -int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); @@ -278,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, * @brief qed_ll2_terminate_connection - stops Tx/Rx queues * * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle * obtained from * qed_ll2_require_connection * * @return 0 on success, failure otherwise */ -int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_get_stats - get LL2 queue's statistics * * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * @param p_stats * * @return 0 on success, failure otherwise */ -int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, +int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** @@ -306,27 +247,24 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, * * @param p_hwfn * - * @return pointer to alocated qed_ll2_info or NULL + * @return int */ -struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn); +int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** * @brief qed_ll2_setup - Inits LL2 connections set * * @param p_hwfn - * @param p_ll2_connections * */ -void qed_ll2_setup(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_connections); +void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** * @brief qed_ll2_free - Releases LL2 connections set * * @param p_hwfn - * @param p_ll2_connections * */ -void qed_ll2_free(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_connections); +void qed_ll2_free(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 715b3aaf83ac..b11399606990 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -34,7 +34,6 @@ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/version.h> #include <linux/delay.h> #include <asm/byteorder.h> #include <linux/dma-mapping.h> @@ -123,7 +122,7 @@ static void qed_free_pci(struct qed_dev *cdev) { struct pci_dev *pdev = cdev->pdev; - if (cdev->doorbells) + if (cdev->doorbells && cdev->db_size) iounmap(cdev->doorbells); if (cdev->regview) iounmap(cdev->regview); @@ -207,16 +206,24 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) goto err2; } - if (IS_PF(cdev)) { - cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); - cdev->db_size = pci_resource_len(cdev->pdev, 2); - cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); - if (!cdev->doorbells) { - DP_NOTICE(cdev, "Cannot map doorbell space\n"); - return -ENOMEM; + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_size = pci_resource_len(cdev->pdev, 2); + if (!cdev->db_size) { + if (IS_PF(cdev)) { + DP_NOTICE(cdev, "No Doorbell bar available\n"); + return -EINVAL; + } else { + return 0; } } + cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); + + if (!cdev->doorbells) { + DP_NOTICE(cdev, "Cannot map doorbell space\n"); + return -ENOMEM; + } + return 0; err2: @@ -230,6 +237,8 @@ err0: int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_hw_info *hw_info = &p_hwfn->hw_info; struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_ptt *ptt; @@ -253,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == - QED_PCI_ETH_ROCE); + dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->dev_type = cdev->type; - ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); if (IS_PF(cdev)) { dev_info->fw_major = FW_MAJOR_VERSION; @@ -267,9 +275,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->mf_mode = cdev->mf_mode; dev_info->tx_switching = true; - if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == - QED_WOL_SUPPORT_PME) + if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, @@ -282,6 +291,9 @@ int qed_fill_dev_info(struct qed_dev *cdev, qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, &dev_info->mfw_rev, NULL); + qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mbi_version); + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, &dev_info->flash_size); @@ -292,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, &dev_info->mfw_rev, NULL); } - dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; + dev_info->mtu = hw_info->mtu; return 0; } @@ -336,6 +348,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, if (!cdev) goto err0; + cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; cdev->protocol = params->protocol; if (params->is_vf) @@ -607,6 +620,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) return rc; } +static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) +{ + /* Calling the disable function will make sure that any + * currently-running function is completed. The following call to the + * enable function makes this sequence a flush-like operation. + */ + if (p_hwfn->b_sp_dpc_enabled) { + tasklet_disable(p_hwfn->sp_dpc); + tasklet_enable(p_hwfn->sp_dpc); + } +} + void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) { struct qed_dev *cdev = p_hwfn->cdev; @@ -618,6 +643,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) synchronize_irq(cdev->int_params.msix_table[id].vector); else synchronize_irq(cdev->pdev->irq); + + qed_slowpath_tasklet_flush(p_hwfn); } static void qed_slowpath_irq_free(struct qed_dev *cdev) @@ -745,7 +772,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); - cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors += sb_cnt_info.cnt; cdev->int_params.in.num_vectors++; /* slowpath */ } @@ -763,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, cdev->num_hwfns; if (!IS_ENABLED(CONFIG_QED_RDMA) || - QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) + !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) return 0; for_each_hwfn(cdev, i) @@ -904,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, /* In case we might support RDMA, don't allow qede to be greedy * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. */ - if (QED_LEADING_HWFN(cdev)->hw_info.personality == - QED_PCI_ETH_ROCE) { + if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { u16 *num_cons; num_cons = ¶ms->eth_pf_params.num_cons; @@ -1112,17 +1138,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev) return 0; } -static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], - char ver_str[VER_SIZE]) +static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) { int i; memcpy(cdev->name, name, NAME_SIZE); for_each_hwfn(cdev, i) snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); - - memcpy(cdev->ver_str, ver_str, VER_SIZE); - cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; } static u32 qed_sb_init(struct qed_dev *cdev, @@ -1520,6 +1542,21 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, + u8 *buf, u16 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + int rc; + + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); + qed_ptt_release(hwfn, ptt); + return rc; +} + static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) { *rx_coal = cdev->rx_coalesce_usecs; @@ -1676,7 +1713,7 @@ const struct qed_common_ops qed_common_ops_pass = { .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, - .set_id = &qed_set_id, + .set_name = &qed_set_name, .update_pf_params = &qed_update_pf_params, .slowpath_start = &qed_slowpath_start, .slowpath_stop = &qed_slowpath_stop, @@ -1697,6 +1734,7 @@ const struct qed_common_ops qed_common_ops_pass = { .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .nvm_get_image = &qed_nvm_get_image, .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 7266b36a2655..9da91045d167 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -177,6 +177,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) } kfree(p_hwfn->mcp_info); + p_hwfn->mcp_info = NULL; return 0; } @@ -1397,6 +1398,28 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ¶m); } +static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 resp = 0, param = 0; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + + p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & + FUNC_MF_CFG_OV_STAG_MASK; + p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; + if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && + (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); + qed_sp_pf_update_stag(p_hwfn); + } + + /* Acknowledge the MFW */ + qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1452,6 +1475,10 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_BW_UPDATE: qed_mcp_update_bw(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_S_TAG_UPDATE: + qed_mcp_update_stag(p_hwfn, p_ptt); + break; + break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; @@ -1522,6 +1549,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + /* Read the address of the nvm_cfg */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read the offset of nvm_cfg1 */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, mbi_version); + *p_mbi_ver = qed_rd(p_hwfn, p_ptt, + mbi_ver_addr) & + (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | + NVM_CFG1_GLOB_MBI_VERSION_1_MASK | + NVM_CFG1_GLOB_MBI_VERSION_2_MASK); + + return 0; +} + int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; @@ -1679,10 +1736,10 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); } - info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | - (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); - info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | - (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); + info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower | + (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32); + info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower | + (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32); info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); @@ -1770,8 +1827,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 vf_id, u8 num) +static int +qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) { u32 resp = 0, param = 0, rc_param = 0; int rc; @@ -1801,6 +1859,36 @@ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, return rc; } +static int +qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 num) +{ + u32 resp = 0, param = num, rc_param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, + param, &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { + DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n"); + rc = -EINVAL; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requested 0x%02x MSI-x interrupts for VFs\n", num); + } + + return rc; +} + +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) +{ + if (QED_IS_BB(p_hwfn->cdev)) + return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); + else + return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); +} + int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2222,6 +2310,95 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, return rc; } +static int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att) +{ + struct bist_nvm_image_att mfw_image_att; + enum nvm_image_type type; + u32 num_images, i; + int rc; + + /* Translate image_id into MFW definitions */ + switch (image_id) { + case QED_NVM_IMAGE_ISCSI_CFG: + type = NVM_TYPE_ISCSI_CFG; + break; + case QED_NVM_IMAGE_FCOE_CFG: + type = NVM_TYPE_FCOE_CFG; + break; + default: + DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", + image_id); + return -EINVAL; + } + + /* Learn number of images, then traverse and see if one fits */ + rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) + return -EINVAL; + + for (i = 0; i < num_images; i++) { + rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, + &mfw_image_att, i); + if (rc) + return rc; + + if (type == mfw_image_att.image_type) + break; + } + if (i == num_images) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Failed to find nvram image of type %08x\n", + image_id); + return -EINVAL; + } + + p_image_att->start_addr = mfw_image_att.nvm_start_addr; + p_image_att->length = mfw_image_att.len; + + return 0; +} + +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len) +{ + struct qed_nvm_image_att image_att; + int rc; + + memset(p_buffer, 0, buffer_len); + + rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + if (rc) + return rc; + + /* Validate sizes - both the image's and the supplied buffer's */ + if (image_att.length <= 4) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Image [%d] is too small - only %d bytes\n", + image_id, image_att.length); + return -EINVAL; + } + + /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ + image_att.length -= 4; + + if (image_att.length > buffer_len) { + DP_VERBOSE(p_hwfn, + QED_MSG_STORAGE, + "Image [%d] is too big - %08x bytes where only %08x are available\n", + image_id, image_att.length, buffer_len); + return -ENOMEM; + } + + return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, + p_buffer, image_att.length); +} + static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) { enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 2b09b8545236..af03b3651411 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -256,6 +256,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** + * @brief Get the MBI version value + * + * @param p_hwfn + * @param p_ptt + * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver); + +/** * @brief Get media type value of the port. * * @param cdev - qed dev pointer @@ -418,6 +430,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); +struct qed_nvm_image_att { + u32 start_addr; + u32 length; +}; + +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn + * @param p_ptt + * @param image_id - image requested for reading + * @param p_buffer - allocated buffer into which to fill data + * @param buffer_len - length of the allocated buffer. + * + * @return 0 iff p_buffer now contains the nvram image. + */ +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len); + /** * @brief Bist register test * @@ -482,7 +515,7 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ - ((_p_hwfn)->cdev->num_ports_in_engines * \ + ((_p_hwfn)->cdev->num_ports_in_engine * \ qed_device_num_engines((_p_hwfn)->cdev))) struct qed_mcp_info { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index db96670192c7..000636530111 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -99,7 +99,7 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, p_history->head_idx++; } -struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) +int qed_ooo_alloc(struct qed_hwfn *p_hwfn) { u16 max_num_archipelagos = 0, cid_base; struct qed_ooo_info *p_ooo_info; @@ -109,7 +109,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) { DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info: unknown personality\n"); - return NULL; + return -EINVAL; } max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; @@ -119,12 +119,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info: unknown amount of connections\n"); - return NULL; + return -EINVAL; } p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); if (!p_ooo_info) - return NULL; + return -ENOMEM; p_ooo_info->cid_base = cid_base; p_ooo_info->max_num_archipelagos = max_num_archipelagos; @@ -164,7 +164,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES; - return p_ooo_info; + p_hwfn->p_ooo_info = p_ooo_info; + return 0; no_history_mem: kfree(p_ooo_info->p_archipelagos_mem); @@ -172,7 +173,7 @@ no_archipelagos_mem: kfree(p_ooo_info->p_isles_mem); no_isles_mem: kfree(p_ooo_info); - return NULL; + return -ENOMEM; } void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, @@ -249,19 +250,23 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, &p_ooo_info->free_buffers_list); } -void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +void qed_ooo_setup(struct qed_hwfn *p_hwfn) { - qed_ooo_release_all_isles(p_hwfn, p_ooo_info); - memset(p_ooo_info->ooo_history.p_cqes, 0, - p_ooo_info->ooo_history.num_of_cqes * + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0, + p_hwfn->p_ooo_info->ooo_history.num_of_cqes * sizeof(struct ooo_opaque)); - p_ooo_info->ooo_history.head_idx = 0; + p_hwfn->p_ooo_info->ooo_history.head_idx = 0; } -void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +void qed_ooo_free(struct qed_hwfn *p_hwfn) { + struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info; struct qed_ooo_buffer *p_buffer; + if (!p_ooo_info) + return; + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); while (!list_empty(&p_ooo_info->free_buffers_list)) { p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, @@ -282,6 +287,7 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) kfree(p_ooo_info->p_archipelagos_mem); kfree(p_ooo_info->ooo_history.p_cqes); kfree(p_ooo_info); + p_hwfn->p_ooo_info = NULL; } void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 791ad0f8b759..e8ed40b848f5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -88,7 +88,11 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct ooo_opaque *p_cqe); -struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn); +int qed_ooo_alloc(struct qed_hwfn *p_hwfn); + +void qed_ooo_setup(struct qed_hwfn *p_hwfn); + +void qed_ooo_free(struct qed_hwfn *p_hwfn); void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, @@ -97,10 +101,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); -void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); - -void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); - void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct qed_ooo_buffer *p_buffer); @@ -140,8 +140,14 @@ static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct ooo_opaque *p_cqe) {} -static inline struct qed_ooo_info *qed_ooo_alloc( - struct qed_hwfn *p_hwfn) { return NULL; } +static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {} + +static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {} static inline void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, @@ -152,12 +158,6 @@ static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) {} -static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn, - struct qed_ooo_info *p_ooo_info) {} - -static inline void qed_ooo_free(struct qed_hwfn *p_hwfn, - struct qed_ooo_info *p_ooo_info) {} - static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct qed_ooo_buffer *p_buffer) {} diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 434a164a76ed..5a90d69dc2f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -80,7 +80,7 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* MFW doesn't support resource locking, first PF on the port * has lock ownership. */ - if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) return 0; DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); @@ -108,7 +108,7 @@ static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms); if (rc == -EINVAL) { /* MFW doesn't support locking, first PF has lock ownership */ - if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) { + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) { rc = 0; } else { DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c new file mode 100644 index 000000000000..6fb99518a61f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -0,0 +1,1787 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include <linux/qed/qed_rdma_if.h> +#include "qed_rdma.h" +#include "qed_roce.h" +#include "qed_sp.h" + + +int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 max_count, char *name) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); + + bmap->max_count = max_count; + + bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), + GFP_KERNEL); + if (!bmap->bitmap) + return -ENOMEM; + + snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); + return 0; +} + +int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 *id_num) +{ + *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); + if (*id_num >= bmap->max_count) + return -EINVAL; + + __set_bit(*id_num, bmap->bitmap); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", + bmap->name, *id_num); + + return 0; +} + +void qed_bmap_set_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return; + + __set_bit(id_num, bmap->bitmap); +} + +void qed_bmap_release_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + bool b_acquired; + + if (id_num >= bmap->max_count) + return; + + b_acquired = test_and_clear_bit(id_num, bmap->bitmap); + if (!b_acquired) { + DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", + bmap->name, id_num); + return; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", + bmap->name, id_num); +} + +int qed_bmap_test_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return -1; + + return test_bit(id_num, bmap->bitmap); +} + +static bool qed_bmap_is_empty(struct qed_bmap *bmap) +{ + return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); +} + +u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +{ + /* First sb id for RoCE is after all the l2 sb */ + return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_info *p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + + /* Allocate a struct with current pf rdma info */ + p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); + if (!p_rdma_info) + return rc; + + p_hwfn->p_rdma_info = p_rdma_info; + p_rdma_info->proto = PROTOCOLID_ROCE; + + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, + NULL); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_rdma_info->num_qps = num_cons; + else + p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ + + num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); + + /* Each MR uses a single task */ + p_rdma_info->num_mrs = num_tasks; + + /* Queue zone lines are shared between RoCE and L2 in such a way that + * they can be used by each without obstructing the other. + */ + p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); + + /* Allocate a struct with device params and fill it */ + p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); + if (!p_rdma_info->dev) + goto free_rdma_info; + + /* Allocate a struct with port params and fill it */ + p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); + if (!p_rdma_info->port) + goto free_rdma_dev; + + /* Allocate bit map for pd's */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, + "PD"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate pd_map, rc = %d\n", + rc); + goto free_rdma_port; + } + + /* Allocate DPI bitmap */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, + p_hwfn->dpi_count, "DPI"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate DPI bitmap, rc = %d\n", rc); + goto free_pd_map; + } + + /* Allocate bitmap for cq's. The maximum number of CQs is bounded to + * twice the number of QPs. + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, + p_rdma_info->num_qps * 2, "CQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cq bitmap, rc = %d\n", rc); + goto free_dpi_map; + } + + /* Allocate bitmap for toggle bit for cq icids + * We toggle the bit every time we create or resize cq for a given icid. + * The maximum number of CQs is bounded to twice the number of QPs. + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, + p_rdma_info->num_qps * 2, "Toggle"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate toogle bits, rc = %d\n", rc); + goto free_cq_map; + } + + /* Allocate bitmap for itids */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, + p_rdma_info->num_mrs, "MR"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate itids bitmaps, rc = %d\n", rc); + goto free_toggle_map; + } + + /* Allocate bitmap for cids used for qps. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, + "CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cid bitmap, rc = %d\n", rc); + goto free_tid_map; + } + + /* Allocate bitmap for cids used for responders/requesters. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, + "REAL_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate real cid bitmap, rc = %d\n", rc); + goto free_cid_map; + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_alloc(p_hwfn); + + if (rc) + goto free_cid_map; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); + return 0; + +free_cid_map: + kfree(p_rdma_info->cid_map.bitmap); +free_tid_map: + kfree(p_rdma_info->tid_map.bitmap); +free_toggle_map: + kfree(p_rdma_info->toggle_bits.bitmap); +free_cq_map: + kfree(p_rdma_info->cq_map.bitmap); +free_dpi_map: + kfree(p_rdma_info->dpi_map.bitmap); +free_pd_map: + kfree(p_rdma_info->pd_map.bitmap); +free_rdma_port: + kfree(p_rdma_info->port); +free_rdma_dev: + kfree(p_rdma_info->dev); +free_rdma_info: + kfree(p_rdma_info); + + return rc; +} + +void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, bool check) +{ + int weight = bitmap_weight(bmap->bitmap, bmap->max_count); + int last_line = bmap->max_count / (64 * 8); + int last_item = last_line * 8 + + DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); + u64 *pmap = (u64 *)bmap->bitmap; + int line, item, offset; + u8 str_last_line[200] = { 0 }; + + if (!weight || !check) + goto end; + + DP_NOTICE(p_hwfn, + "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", + bmap->name, bmap->max_count, weight); + + /* print aligned non-zero lines, if any */ + for (item = 0, line = 0; line < last_line; line++, item += 8) + if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) + DP_NOTICE(p_hwfn, + "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + line, + pmap[item], + pmap[item + 1], + pmap[item + 2], + pmap[item + 3], + pmap[item + 4], + pmap[item + 5], + pmap[item + 6], pmap[item + 7]); + + /* print last unaligned non-zero line, if any */ + if ((bmap->max_count % (64 * 8)) && + (bitmap_weight((unsigned long *)&pmap[item], + bmap->max_count - item * 64))) { + offset = sprintf(str_last_line, "line 0x%04x: ", line); + for (; item < last_item; item++) + offset += sprintf(str_last_line + offset, + "0x%016llx ", pmap[item]); + DP_NOTICE(p_hwfn, "%s\n", str_last_line); + } + +end: + kfree(bmap->bitmap); + bmap->bitmap = NULL; +} + +static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_resc_free(p_hwfn); + + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + + kfree(p_rdma_info->port); + kfree(p_rdma_info->dev); + + kfree(p_rdma_info); +} + +static void qed_rdma_free(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); + + qed_rdma_resc_free(p_hwfn); +} + +static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) +{ + guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; + guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; + guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; + guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; + guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; +} + +static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_events *events; + + events = &p_hwfn->p_rdma_info->events; + + events->unaffiliated_event = params->events->unaffiliated_event; + events->affiliated_event = params->events->affiliated_event; + events->context = params->events->context; +} + +static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + struct qed_dev *cdev = p_hwfn->cdev; + u32 pci_status_control; + u32 num_qps; + + /* Vendor specific information */ + dev->vendor_id = cdev->vendor_id; + dev->vendor_part_id = cdev->device_id; + dev->hw_ver = 0; + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); + + qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); + dev->node_guid = dev->sys_image_guid; + + dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, + RDMA_MAX_SGE_PER_RQ_WQE); + + if (cdev->rdma_max_sge) + dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + + dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; + + dev->max_inline = (cdev->rdma_max_inline) ? + min_t(u32, cdev->rdma_max_inline, dev->max_inline) : + dev->max_inline; + + dev->max_wqe = QED_RDMA_MAX_WQE; + dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); + + /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because + * it is up-aligned to 16 and then to ILT page size within qed cxt. + * This is OK in terms of ILT but we don't want to configure the FW + * above its abilities + */ + num_qps = ROCE_MAX_QPS; + num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); + dev->max_qp = num_qps; + + /* CQs uses the same icids that QPs use hence they are limited by the + * number of icids. There are two icids per QP. + */ + dev->max_cq = num_qps * 2; + + /* The number of mrs is smaller by 1 since the first is reserved */ + dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; + dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; + + /* The maximum CQE capacity per CQ supported. + * max number of cqes will be in two layer pbl, + * 8 is the pointer size in bytes + * 32 is the size of cq element in bytes + */ + if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) + dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; + else + dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; + + dev->max_mw = 0; + dev->max_fmr = QED_RDMA_MAX_FMR; + dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); + dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; + dev->max_pkey = QED_RDMA_MAX_P_KEY; + + dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); + dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + RDMA_REQ_RD_ATOMIC_ELM_SIZE; + dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * + p_hwfn->p_rdma_info->num_qps; + dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; + dev->dev_ack_delay = QED_RDMA_ACK_DELAY; + dev->max_pd = RDMA_MAX_PDS; + dev->max_ah = p_hwfn->p_rdma_info->num_qps; + dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); + + /* Set capablities */ + dev->dev_caps = 0; + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); + + /* Check atomic operations support in PCI configuration space. */ + pci_read_config_dword(cdev->pdev, + cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, + &pci_status_control); + + if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_devinfo(p_hwfn); +} + +static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + port->port_state = p_hwfn->mcp_info->link_output.link_up ? + QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; + + port->max_msg_size = min_t(u64, + (dev->max_mr_mw_fmr_size * + p_hwfn->cdev->rdma_max_sge), + BIT(31)); + + port->pkey_bad_counter = 0; +} + +static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); + p_hwfn->b_rdma_enabled_in_prs = false; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_hw(p_hwfn, p_ptt); + else + rc = qed_roce_init_hw(p_hwfn, p_ptt); + + return rc; +} + +static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params, + struct qed_ptt *p_ptt) +{ + struct rdma_init_func_ramrod_data *p_ramrod; + struct qed_rdma_cnq_params *p_cnq_pbl_list; + struct rdma_init_func_hdr *p_params_header; + struct rdma_cnq_params *p_cnq_params; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u32 cnq_id, sb_id; + u16 igu_sb_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); + + /* Save the number of cnqs for the function close ramrod */ + p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; + else + p_ramrod = &p_ent->ramrod.roce_init_func.rdma; + + p_params_header = &p_ramrod->params_header; + p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, + QED_RDMA_CNQ_RAM); + p_params_header->num_cnqs = params->desired_cnq; + + if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) + p_params_header->cq_ring_mode = 1; + else + p_params_header->cq_ring_mode = 0; + + for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { + sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); + igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); + p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); + p_cnq_params = &p_ramrod->cnq_params[cnq_id]; + p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; + + p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; + p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; + + DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, + p_cnq_pbl_list->pbl_ptr); + + /* we assume here that cnq_id and qz_offset are the same */ + p_cnq_params->queue_zone_num = + cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + + cnq_id); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) + goto out; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + /* The first DPI is reserved for the Kernel */ + __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); + + /* Tid 0 will be used as the key for "reserved MR". + * The driver should allocate memory for it so it can be loaded but no + * ramrod should be passed on it. + */ + qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); + if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { + DP_NOTICE(p_hwfn, + "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); + return -EINVAL; + } + + return 0; +} + +static int qed_rdma_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); + + spin_lock_init(&p_hwfn->p_rdma_info->lock); + + qed_rdma_init_devinfo(p_hwfn, params); + qed_rdma_init_port(p_hwfn); + qed_rdma_init_events(p_hwfn, params); + + rc = qed_rdma_reserve_lkey(p_hwfn); + if (rc) + return rc; + + rc = qed_rdma_init_hw(p_hwfn, p_ptt); + if (rc) + return rc; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_setup(p_hwfn, p_ptt, params); + if (rc) + return rc; + } else { + rc = qed_roce_setup(p_hwfn); + if (rc) + return rc; + } + + return qed_rdma_start_fw(p_hwfn, params, p_ptt); +} + +int qed_rdma_stop(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_close_func_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_ptt *p_ptt; + u32 ll2_ethertype_en; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); + return rc; + } + + /* Disable RoCE search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); + p_hwfn->b_rdma_enabled_in_prs = false; + + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en & 0xFFFE)); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_stop(p_hwfn, p_ptt); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + } else { + qed_roce_stop(p_hwfn); + } + + qed_ptt_release(p_hwfn, p_ptt); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + /* Stop RoCE */ + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto out; + + p_ramrod = &p_ent->ramrod.rdma_close_func; + + p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; + p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + +out: + qed_rdma_free(p_hwfn); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 dpi_start_offset; + u32 returned_id = 0; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); + + /* Allocate DPI */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, + &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + out_params->dpi = (u16)returned_id; + + /* Calculate the corresponding DPI address */ + dpi_start_offset = p_hwfn->dpi_start_offset; + + out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + + dpi_start_offset + + ((out_params->dpi) * p_hwfn->dpi_size)); + + out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + + dpi_start_offset + + ((out_params->dpi) * p_hwfn->dpi_size); + + out_params->dpi_size = p_hwfn->dpi_size; + out_params->wid_count = p_hwfn->wid_count; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); + return rc; +} + +static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); + + /* Link may have changed */ + p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? + QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; + + p_port->link_speed = p_hwfn->mcp_info->link_output.speed; + + p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; + + return p_port; +} + +static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); + + /* Return struct with device parameters */ + return p_hwfn->p_rdma_info->dev; +} + +static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) +{ + struct qed_hwfn *p_hwfn; + u16 qz_num; + u32 addr; + + p_hwfn = (struct qed_hwfn *)rdma_cxt; + + if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { + DP_NOTICE(p_hwfn, + "queue zone offset %d is too large (max is %d)\n", + qz_offset, p_hwfn->p_rdma_info->max_queue_zones); + return; + } + + qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + wmb(); +} + +static int qed_fill_rdma_dev_info(struct qed_dev *cdev, + struct qed_dev_rdma_info *info) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + + memset(info, 0, sizeof(*info)); + + info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? + QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; + + info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); + + qed_fill_dev_info(cdev, &info->common); + + return 0; +} + +static int qed_rdma_get_sb_start(struct qed_dev *cdev) +{ + int feat_num; + + if (cdev->num_hwfns > 1) + feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); + else + feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * + cdev->num_hwfns; + + return feat_num; +} + +static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) +{ + int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); + int n_msix = cdev->int_params.rdma_msix_cnt; + + return min_t(int, n_cnq, n_msix); +} + +static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) +{ + int limit = 0; + + /* Mark the fastpath as free/used */ + cdev->int_params.fp_initialized = cnt ? true : false; + + if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { + DP_ERR(cdev, + "qed roce supports only MSI-X interrupts (detected %d).\n", + cdev->int_params.out.int_mode); + return -EINVAL; + } else if (cdev->int_params.fp_msix_cnt) { + limit = cdev->int_params.rdma_msix_cnt; + } + + if (!limit) + return -ENOMEM; + + return min_t(int, cnt, limit); +} + +static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) +{ + memset(info, 0, sizeof(*info)); + + if (!cdev->int_params.fp_initialized) { + DP_INFO(cdev, + "Protocol driver requested interrupt information, but its support is not yet configured\n"); + return -EINVAL; + } + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int msix_base = cdev->int_params.rdma_msix_base; + + info->msix_cnt = cdev->int_params.rdma_msix_cnt; + info->msix = &cdev->int_params.msix_table[msix_base]; + + DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", + info->msix_cnt, msix_base); + } + + return 0; +} + +static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 returned_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); + + /* Allocates an unused protection domain */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->pd_map, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + *pd = (u16)returned_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); + return rc; +} + +static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); + + /* Returns a previously allocated protection domain for reuse */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static enum qed_rdma_toggle_bit +qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) +{ + struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; + enum qed_rdma_toggle_bit toggle_bit; + u32 bmap_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); + + /* the function toggle the bit that is related to a given icid + * and returns the new toggle bit's value + */ + bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); + + spin_lock_bh(&p_info->lock); + toggle_bit = !test_and_change_bit(bmap_id, + p_info->toggle_bits.bitmap); + spin_unlock_bh(&p_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", + toggle_bit); + + return toggle_bit; +} + +static int qed_rdma_create_cq(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; + struct rdma_create_cq_ramrod_data *p_ramrod; + enum qed_rdma_toggle_bit toggle_bit; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u32 returned_id, start_cid; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", + params->cq_handle_hi, params->cq_handle_lo); + + /* Allocate icid */ + spin_lock_bh(&p_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); + spin_unlock_bh(&p_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); + return rc; + } + + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, + p_info->proto); + *icid = returned_id + start_cid; + + /* Check if icid requires a page allocation */ + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); + if (rc) + goto err; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = *icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + /* Send create CQ ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_CQ, + p_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_cq; + + p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); + p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); + p_ramrod->dpi = cpu_to_le16(params->dpi); + p_ramrod->is_two_level_pbl = params->pbl_two_level; + p_ramrod->max_cqes = cpu_to_le32(params->cq_size); + DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); + p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); + p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + + params->cnq_id; + p_ramrod->int_timeout = params->int_timeout; + + /* toggle the bit for every resize or create cq for a given icid */ + toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); + + p_ramrod->toggle_bit = toggle_bit; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) { + /* restore toggle bit */ + qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); + goto err; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); + return rc; + +err: + /* release allocated icid */ + spin_lock_bh(&p_info->lock); + qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); + spin_unlock_bh(&p_info->lock); + DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); + + return rc; +} + +static int +qed_rdma_destroy_cq(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *in_params, + struct qed_rdma_destroy_cq_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_destroy_cq_output_params *p_ramrod_res; + struct rdma_destroy_cq_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t ramrod_res_phys; + enum protocol_type proto; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); + + p_ramrod_res = + (struct rdma_destroy_cq_output_params *) + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + &ramrod_res_phys, GFP_KERNEL); + if (!p_ramrod_res) { + DP_NOTICE(p_hwfn, + "qed destroy cq failed: cannot allocate memory (ramrod)\n"); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = in_params->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + proto = p_hwfn->p_rdma_info->proto; + /* Send destroy CQ ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_CQ, + proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_destroy_cq; + DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + p_ramrod_res, ramrod_res_phys); + + /* Free icid */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + qed_bmap_release_id(p_hwfn, + &p_hwfn->p_rdma_info->cq_map, + (in_params->icid - + qed_cxt_get_proto_cid_start(p_hwfn, proto))); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); + return rc; + +err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + p_ramrod_res, ramrod_res_phys); + + return rc; +} + +void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) +{ + p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); + p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); + p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); +} + +static int qed_rdma_query_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + /* The following fields are filled in from qp and not FW as they can't + * be modified by FW + */ + out_params->mtu = qp->mtu; + out_params->dest_qp = qp->dest_qp; + out_params->incoming_atomic_en = qp->incoming_atomic_en; + out_params->e2e_flow_control_en = qp->e2e_flow_control_en; + out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; + out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; + out_params->dgid = qp->dgid; + out_params->flow_label = qp->flow_label; + out_params->hop_limit_ttl = qp->hop_limit_ttl; + out_params->traffic_class_tos = qp->traffic_class_tos; + out_params->timeout = qp->ack_timeout; + out_params->rnr_retry = qp->rnr_retry_cnt; + out_params->retry_cnt = qp->retry_cnt; + out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; + out_params->pkey_index = 0; + out_params->max_rd_atomic = qp->max_rd_atomic_req; + out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; + out_params->sqd_async = qp->sqd_async; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_query_qp(qp, out_params); + else + rc = qed_roce_query_qp(p_hwfn, qp, out_params); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_destroy_qp(p_hwfn, qp); + else + rc = qed_roce_destroy_qp(p_hwfn, qp); + + /* free qp params struct */ + kfree(qp); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); + return rc; +} + +static struct qed_rdma_qp * +qed_rdma_create_qp(void *rdma_cxt, + struct qed_rdma_create_qp_in_params *in_params, + struct qed_rdma_create_qp_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_qp *qp; + u8 max_stats_queues; + int rc; + + if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { + DP_ERR(p_hwfn->cdev, + "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", + rdma_cxt, in_params, out_params); + return NULL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "qed rdma create qp called with qp_handle = %08x%08x\n", + in_params->qp_handle_hi, in_params->qp_handle_lo); + + /* Some sanity checks... */ + max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; + if (in_params->stats_queue >= max_stats_queues) { + DP_ERR(p_hwfn->cdev, + "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", + in_params->stats_queue, max_stats_queues); + return NULL; + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + if (in_params->sq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Sq num pages: %d exceeds maximum\n", + in_params->sq_num_pages); + return NULL; + } + if (in_params->rq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Rq num pages: %d exceeds maximum\n", + in_params->rq_num_pages); + return NULL; + } + } + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return NULL; + + qp->cur_state = QED_ROCE_QP_STATE_RESET; + qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); + qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); + qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); + qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); + qp->use_srq = in_params->use_srq; + qp->signal_all = in_params->signal_all; + qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; + qp->pd = in_params->pd; + qp->dpi = in_params->dpi; + qp->sq_cq_id = in_params->sq_cq_id; + qp->sq_num_pages = in_params->sq_num_pages; + qp->sq_pbl_ptr = in_params->sq_pbl_ptr; + qp->rq_cq_id = in_params->rq_cq_id; + qp->rq_num_pages = in_params->rq_num_pages; + qp->rq_pbl_ptr = in_params->rq_pbl_ptr; + qp->srq_id = in_params->srq_id; + qp->req_offloaded = false; + qp->resp_offloaded = false; + qp->e2e_flow_control_en = qp->use_srq ? false : true; + qp->stats_queue = in_params->stats_queue; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); + qp->qpid = qp->icid; + } else { + rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); + qp->qpid = ((0xFF << 16) | qp->icid); + } + + if (rc) { + kfree(qp); + return NULL; + } + + out_params->icid = qp->icid; + out_params->qp_id = qp->qpid; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); + return qp; +} + +static int qed_rdma_modify_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + enum qed_roce_qp_state prev_state; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", + qp->icid, params->new_state); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { + qp->incoming_rdma_read_en = params->incoming_rdma_read_en; + qp->incoming_rdma_write_en = params->incoming_rdma_write_en; + qp->incoming_atomic_en = params->incoming_atomic_en; + } + + /* Update QP structure with the updated values */ + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) + qp->roce_mode = params->roce_mode; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) + qp->pkey = params->pkey; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) + qp->e2e_flow_control_en = params->e2e_flow_control_en; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) + qp->dest_qp = params->dest_qp; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { + /* Indicates that the following parameters have changed: + * Traffic class, flow label, hop limit, source GID, + * destination GID, loopback indicator + */ + qp->traffic_class_tos = params->traffic_class_tos; + qp->flow_label = params->flow_label; + qp->hop_limit_ttl = params->hop_limit_ttl; + + qp->sgid = params->sgid; + qp->dgid = params->dgid; + qp->udp_src_port = 0; + qp->vlan_id = params->vlan_id; + qp->mtu = params->mtu; + qp->lb_indication = params->lb_indication; + memcpy((u8 *)&qp->remote_mac_addr[0], + (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); + if (params->use_local_mac) { + memcpy((u8 *)&qp->local_mac_addr[0], + (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); + } else { + memcpy((u8 *)&qp->local_mac_addr[0], + (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); + } + } + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) + qp->rq_psn = params->rq_psn; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) + qp->sq_psn = params->sq_psn; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) + qp->max_rd_atomic_req = params->max_rd_atomic_req; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) + qp->max_rd_atomic_resp = params->max_rd_atomic_resp; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) + qp->ack_timeout = params->ack_timeout; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) + qp->retry_cnt = params->retry_cnt; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) + qp->rnr_retry_cnt = params->rnr_retry_cnt; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) + qp->min_rnr_nak_timer = params->min_rnr_nak_timer; + + qp->sqd_async = params->sqd_async; + + prev_state = qp->cur_state; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { + qp->cur_state = params->new_state; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", + qp->cur_state); + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + enum qed_iwarp_qp_state new_state = + qed_roce2iwarp_state(qp->cur_state); + + rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); + } else { + rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); + return rc; +} + +static int +qed_rdma_register_tid(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_register_tid_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + enum rdma_tid_type tid_type; + u8 fw_return_code; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (p_hwfn->p_rdma_info->last_tid < params->itid) + p_hwfn->p_rdma_info->last_tid = params->itid; + + p_ramrod = &p_ent->ramrod.rdma_register_tid; + + p_ramrod->flags = 0; + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, + params->pbl_two_level); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); + + /* Don't initialize D/C field, as it may override other bits. */ + if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, + params->page_size_log - 12); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, + params->remote_read); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, + params->remote_write); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, + params->remote_atomic); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, + params->local_write); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); + + SET_FIELD(p_ramrod->flags, + RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, + params->mw_bind); + + SET_FIELD(p_ramrod->flags1, + RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, + params->pbl_page_size_log - 12); + + SET_FIELD(p_ramrod->flags2, + RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); + + switch (params->tid_type) { + case QED_RDMA_TID_REGISTERED_MR: + tid_type = RDMA_TID_REGISTERED_MR; + break; + case QED_RDMA_TID_FMR: + tid_type = RDMA_TID_FMR; + break; + case QED_RDMA_TID_MW_TYPE1: + tid_type = RDMA_TID_MW_TYPE1; + break; + case QED_RDMA_TID_MW_TYPE2A: + tid_type = RDMA_TID_MW_TYPE2A; + break; + default: + rc = -EINVAL; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + SET_FIELD(p_ramrod->flags1, + RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); + + p_ramrod->itid = cpu_to_le32(params->itid); + p_ramrod->key = params->key; + p_ramrod->pd = cpu_to_le16(params->pd); + p_ramrod->length_hi = (u8)(params->length >> 32); + p_ramrod->length_lo = DMA_LO_LE(params->length); + if (params->zbva) { + /* Lower 32 bits of the registered MR address. + * In case of zero based MR, will hold FBO + */ + p_ramrod->va.hi = 0; + p_ramrod->va.lo = cpu_to_le32(params->fbo); + } else { + DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); + } + DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); + + /* DIF */ + if (params->dif_enabled) { + SET_FIELD(p_ramrod->flags2, + RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); + DMA_REGPAIR_LE(p_ramrod->dif_error_addr, + params->dif_error_addr); + DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); + } + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) + return rc; + + if (fw_return_code != RDMA_RETURN_OK) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_deregister_tid_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_ptt *p_ptt; + u8 fw_return_code; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.rdma_deregister_tid; + p_ramrod->itid = cpu_to_le32(itid); + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); + return -EINVAL; + } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { + /* Bit indicating that the TID is in use and a nig drain is + * required before sending the ramrod again + */ + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + rc = -EBUSY; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to acquire PTT\n"); + return rc; + } + + rc = qed_mcp_drain(p_hwfn, p_ptt); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Drain failed\n"); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + + /* Resend the ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DEREGISTER_MR, + p_hwfn->p_rdma_info->proto, + &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to init sp-element\n"); + return rc; + } + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Ramrod failed\n"); + return rc; + } + + if (fw_return_code != RDMA_RETURN_OK) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", + fw_return_code); + return rc; + } + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); + return rc; +} + +static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) +{ + return QED_LEADING_HWFN(cdev); +} + +bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) +{ + bool result; + + /* if rdma info has not been allocated, naturally there are no qps */ + if (!p_hwfn->p_rdma_info) + return false; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + if (!p_hwfn->p_rdma_info->cid_map.bitmap) + result = false; + else + result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + return result; +} + +void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 val; + + val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); + DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), + "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", + val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); +} + + +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->db_bar_no_edpm = true; + + qed_rdma_dpm_conf(p_hwfn, p_ptt); +} + +static int qed_rdma_start(void *rdma_cxt, + struct qed_rdma_start_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_ptt *p_ptt; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "desired_cnq = %08x\n", params->desired_cnq); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + goto err; + + rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + if (rc) + goto err1; + + rc = qed_rdma_setup(p_hwfn, p_ptt, params); + if (rc) + goto err2; + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; + +err2: + qed_rdma_free(p_hwfn); +err1: + qed_ptt_release(p_hwfn, p_ptt); +err: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_init(struct qed_dev *cdev, + struct qed_rdma_start_in_params *params) +{ + return qed_rdma_start(QED_LEADING_HWFN(cdev), params); +} + +static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, + u8 *old_mac_address, + u8 *new_mac_address) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(cdev, + "qed roce ll2 mac filter set: failed to acquire PTT\n"); + return -EINVAL; + } + + if (old_mac_address) + qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address); + if (new_mac_address) + rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address); + + qed_ptt_release(p_hwfn, p_ptt); + + if (rc) + DP_ERR(cdev, + "qed roce ll2 mac filter set: failed to add MAC filter\n"); + + return rc; +} + +static const struct qed_rdma_ops qed_rdma_ops_pass = { + .common = &qed_common_ops_pass, + .fill_dev_info = &qed_fill_rdma_dev_info, + .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, + .rdma_init = &qed_rdma_init, + .rdma_add_user = &qed_rdma_add_user, + .rdma_remove_user = &qed_rdma_remove_user, + .rdma_stop = &qed_rdma_stop, + .rdma_query_port = &qed_rdma_query_port, + .rdma_query_device = &qed_rdma_query_device, + .rdma_get_start_sb = &qed_rdma_get_sb_start, + .rdma_get_rdma_int = &qed_rdma_get_int, + .rdma_set_rdma_int = &qed_rdma_set_int, + .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, + .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, + .rdma_alloc_pd = &qed_rdma_alloc_pd, + .rdma_dealloc_pd = &qed_rdma_free_pd, + .rdma_create_cq = &qed_rdma_create_cq, + .rdma_destroy_cq = &qed_rdma_destroy_cq, + .rdma_create_qp = &qed_rdma_create_qp, + .rdma_modify_qp = &qed_rdma_modify_qp, + .rdma_query_qp = &qed_rdma_query_qp, + .rdma_destroy_qp = &qed_rdma_destroy_qp, + .rdma_alloc_tid = &qed_rdma_alloc_tid, + .rdma_free_tid = &qed_rdma_free_tid, + .rdma_register_tid = &qed_rdma_register_tid, + .rdma_deregister_tid = &qed_rdma_deregister_tid, + .ll2_acquire_connection = &qed_ll2_acquire_connection, + .ll2_establish_connection = &qed_ll2_establish_connection, + .ll2_terminate_connection = &qed_ll2_terminate_connection, + .ll2_release_connection = &qed_ll2_release_connection, + .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, + .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, + .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, + .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, + .ll2_get_stats = &qed_ll2_get_stats, + .iwarp_connect = &qed_iwarp_connect, + .iwarp_create_listen = &qed_iwarp_create_listen, + .iwarp_destroy_listen = &qed_iwarp_destroy_listen, + .iwarp_accept = &qed_iwarp_accept, + .iwarp_reject = &qed_iwarp_reject, + .iwarp_send_rtr = &qed_iwarp_send_rtr, +}; + +const struct qed_rdma_ops *qed_get_rdma_ops(void) +{ + return &qed_rdma_ops_pass; +} +EXPORT_SYMBOL(qed_get_rdma_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h new file mode 100644 index 000000000000..18ec9cbd84f5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -0,0 +1,206 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_RDMA_H +#define _QED_RDMA_H +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_rdma_if.h> +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_iwarp.h" +#include "qed_roce.h" + +#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) +#define QED_RDMA_MAX_P_KEY (1) +#define QED_RDMA_MAX_WQE (0x7FFF) +#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) +#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) +#define QED_RDMA_ACK_DELAY (15) +#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL) +#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS) +#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS) +/* Add 1 for header element */ +#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1) +#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE) +#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16) +#define QED_RDMA_MAX_SRQS (32 * 1024) + +#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) +#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) + +enum qed_rdma_toggle_bit { + QED_RDMA_TOGGLE_BIT_CLEAR = 0, + QED_RDMA_TOGGLE_BIT_SET = 1 +}; + +#define QED_RDMA_MAX_BMAP_NAME (10) +struct qed_bmap { + unsigned long *bitmap; + u32 max_count; + char name[QED_RDMA_MAX_BMAP_NAME]; +}; + +struct qed_rdma_info { + /* spin lock to protect bitmaps */ + spinlock_t lock; + + struct qed_bmap cq_map; + struct qed_bmap pd_map; + struct qed_bmap tid_map; + struct qed_bmap qp_map; + struct qed_bmap srq_map; + struct qed_bmap cid_map; + struct qed_bmap tcp_cid_map; + struct qed_bmap real_cid_map; + struct qed_bmap dpi_map; + struct qed_bmap toggle_bits; + struct qed_rdma_events events; + struct qed_rdma_device *dev; + struct qed_rdma_port *port; + u32 last_tid; + u8 num_cnqs; + u32 num_qps; + u32 num_mrs; + u16 queue_zone_base; + u16 max_queue_zones; + enum protocol_type proto; + struct qed_iwarp_info iwarp; +}; + +struct qed_rdma_qp { + struct regpair qp_handle; + struct regpair qp_handle_async; + u32 qpid; + u16 icid; + enum qed_roce_qp_state cur_state; + enum qed_iwarp_qp_state iwarp_state; + bool use_srq; + bool signal_all; + bool fmr_and_reserved_lkey; + + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + + u16 pd; + u16 pkey; + u32 dest_qp; + u16 mtu; + u16 srq_id; + u8 traffic_class_tos; + u8 hop_limit_ttl; + u16 dpi; + u32 flow_label; + bool lb_indication; + u16 vlan_id; + u32 ack_timeout; + u8 retry_cnt; + u8 rnr_retry_cnt; + u8 min_rnr_nak_timer; + bool sqd_async; + union qed_gid sgid; + union qed_gid dgid; + enum roce_mode roce_mode; + u16 udp_src_port; + u8 stats_queue; + + /* requeseter */ + u8 max_rd_atomic_req; + u32 sq_psn; + u16 sq_cq_id; + u16 sq_num_pages; + dma_addr_t sq_pbl_ptr; + void *orq; + dma_addr_t orq_phys_addr; + u8 orq_num_pages; + bool req_offloaded; + + /* responder */ + u8 max_rd_atomic_resp; + u32 rq_psn; + u16 rq_cq_id; + u16 rq_num_pages; + dma_addr_t rq_pbl_ptr; + void *irq; + dma_addr_t irq_phys_addr; + u8 irq_num_pages; + bool resp_offloaded; + u32 cq_prod; + + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + + void *shared_queue; + dma_addr_t shared_queue_phys_addr; + struct qed_iwarp_ep *ep; +}; + +#if IS_ENABLED(CONFIG_QED_RDMA) +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +#else +static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} +#endif + +int +qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 max_count, char *name); + +void +qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check); + +int +qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 *id_num); + +void +qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +void +qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +int +qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac); + +bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 1ae73b2d6d1e..0cdb4337b3a0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -242,6 +242,8 @@ 0x50196cUL #define NIG_REG_LLH_CLS_TYPE_DUALMODE \ 0x501964UL +#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL +#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL #define NIG_REG_LLH_FUNC_FILTER_VALUE \ 0x501a00UL #define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \ @@ -558,6 +560,7 @@ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ 0x2aae64UL +#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL @@ -592,15 +595,15 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL -#define PGLCS_REG_DBG_SELECT \ +#define PGLCS_REG_DBG_SELECT_K2 \ 0x001d14UL -#define PGLCS_REG_DBG_DWORD_ENABLE \ +#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \ 0x001d18UL -#define PGLCS_REG_DBG_SHIFT \ +#define PGLCS_REG_DBG_SHIFT_K2 \ 0x001d1cUL -#define PGLCS_REG_DBG_FORCE_VALID \ +#define PGLCS_REG_DBG_FORCE_VALID_K2 \ 0x001d20UL -#define PGLCS_REG_DBG_FORCE_FRAME \ +#define PGLCS_REG_DBG_FORCE_FRAME_K2 \ 0x001d24UL #define MISC_REG_RESET_PL_PDA_VMAIN_1 \ 0x008070UL @@ -612,7 +615,7 @@ 0x009050UL #define MISCS_REG_RESET_PL_HV \ 0x009060UL -#define MISCS_REG_RESET_PL_HV_2 \ +#define MISCS_REG_RESET_PL_HV_2_K2 \ 0x009150UL #define DMAE_REG_DBG_SELECT \ 0x00c510UL @@ -644,15 +647,15 @@ 0x0500b0UL #define GRC_REG_DBG_FORCE_FRAME \ 0x0500b4UL -#define UMAC_REG_DBG_SELECT \ +#define UMAC_REG_DBG_SELECT_K2 \ 0x051094UL -#define UMAC_REG_DBG_DWORD_ENABLE \ +#define UMAC_REG_DBG_DWORD_ENABLE_K2 \ 0x051098UL -#define UMAC_REG_DBG_SHIFT \ +#define UMAC_REG_DBG_SHIFT_K2 \ 0x05109cUL -#define UMAC_REG_DBG_FORCE_VALID \ +#define UMAC_REG_DBG_FORCE_VALID_K2 \ 0x0510a0UL -#define UMAC_REG_DBG_FORCE_FRAME \ +#define UMAC_REG_DBG_FORCE_FRAME_K2 \ 0x0510a4UL #define MCP2_REG_DBG_SELECT \ 0x052400UL @@ -924,15 +927,15 @@ 0x4c160cUL #define XYLD_REG_DBG_FORCE_FRAME \ 0x4c1610UL -#define YULD_REG_DBG_SELECT \ +#define YULD_REG_DBG_SELECT_BB_K2 \ 0x4c9600UL -#define YULD_REG_DBG_DWORD_ENABLE \ +#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \ 0x4c9604UL -#define YULD_REG_DBG_SHIFT \ +#define YULD_REG_DBG_SHIFT_BB_K2 \ 0x4c9608UL -#define YULD_REG_DBG_FORCE_VALID \ +#define YULD_REG_DBG_FORCE_VALID_BB_K2 \ 0x4c960cUL -#define YULD_REG_DBG_FORCE_FRAME \ +#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \ 0x4c9610UL #define TMLD_REG_DBG_SELECT \ 0x4d1600UL @@ -994,35 +997,35 @@ 0x580710UL #define CDU_REG_DBG_FORCE_FRAME \ 0x580714UL -#define WOL_REG_DBG_SELECT \ +#define WOL_REG_DBG_SELECT_K2 \ 0x600140UL -#define WOL_REG_DBG_DWORD_ENABLE \ +#define WOL_REG_DBG_DWORD_ENABLE_K2 \ 0x600144UL -#define WOL_REG_DBG_SHIFT \ +#define WOL_REG_DBG_SHIFT_K2 \ 0x600148UL -#define WOL_REG_DBG_FORCE_VALID \ +#define WOL_REG_DBG_FORCE_VALID_K2 \ 0x60014cUL -#define WOL_REG_DBG_FORCE_FRAME \ +#define WOL_REG_DBG_FORCE_FRAME_K2 \ 0x600150UL -#define BMBN_REG_DBG_SELECT \ +#define BMBN_REG_DBG_SELECT_K2 \ 0x610140UL -#define BMBN_REG_DBG_DWORD_ENABLE \ +#define BMBN_REG_DBG_DWORD_ENABLE_K2 \ 0x610144UL -#define BMBN_REG_DBG_SHIFT \ +#define BMBN_REG_DBG_SHIFT_K2 \ 0x610148UL -#define BMBN_REG_DBG_FORCE_VALID \ +#define BMBN_REG_DBG_FORCE_VALID_K2 \ 0x61014cUL -#define BMBN_REG_DBG_FORCE_FRAME \ +#define BMBN_REG_DBG_FORCE_FRAME_K2 \ 0x610150UL -#define NWM_REG_DBG_SELECT \ +#define NWM_REG_DBG_SELECT_K2 \ 0x8000ecUL -#define NWM_REG_DBG_DWORD_ENABLE \ +#define NWM_REG_DBG_DWORD_ENABLE_K2 \ 0x8000f0UL -#define NWM_REG_DBG_SHIFT \ +#define NWM_REG_DBG_SHIFT_K2 \ 0x8000f4UL -#define NWM_REG_DBG_FORCE_VALID \ +#define NWM_REG_DBG_FORCE_VALID_K2 \ 0x8000f8UL -#define NWM_REG_DBG_FORCE_FRAME \ +#define NWM_REG_DBG_FORCE_FRAME_K2\ 0x8000fcUL #define PBF_REG_DBG_SELECT \ 0xd80060UL @@ -1244,35 +1247,35 @@ 0x1901534UL #define USEM_REG_DBG_FORCE_FRAME \ 0x1901538UL -#define NWS_REG_DBG_SELECT \ +#define NWS_REG_DBG_SELECT_K2 \ 0x700128UL -#define NWS_REG_DBG_DWORD_ENABLE \ +#define NWS_REG_DBG_DWORD_ENABLE_K2 \ 0x70012cUL -#define NWS_REG_DBG_SHIFT \ +#define NWS_REG_DBG_SHIFT_K2 \ 0x700130UL -#define NWS_REG_DBG_FORCE_VALID \ +#define NWS_REG_DBG_FORCE_VALID_K2 \ 0x700134UL -#define NWS_REG_DBG_FORCE_FRAME \ +#define NWS_REG_DBG_FORCE_FRAME_K2 \ 0x700138UL -#define MS_REG_DBG_SELECT \ +#define MS_REG_DBG_SELECT_K2 \ 0x6a0228UL -#define MS_REG_DBG_DWORD_ENABLE \ +#define MS_REG_DBG_DWORD_ENABLE_K2 \ 0x6a022cUL -#define MS_REG_DBG_SHIFT \ +#define MS_REG_DBG_SHIFT_K2 \ 0x6a0230UL -#define MS_REG_DBG_FORCE_VALID \ +#define MS_REG_DBG_FORCE_VALID_K2 \ 0x6a0234UL -#define MS_REG_DBG_FORCE_FRAME \ +#define MS_REG_DBG_FORCE_FRAME_K2 \ 0x6a0238UL -#define PCIE_REG_DBG_COMMON_SELECT \ +#define PCIE_REG_DBG_COMMON_SELECT_K2 \ 0x054398UL -#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \ +#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \ 0x05439cUL -#define PCIE_REG_DBG_COMMON_SHIFT \ +#define PCIE_REG_DBG_COMMON_SHIFT_K2 \ 0x0543a0UL -#define PCIE_REG_DBG_COMMON_FORCE_VALID \ +#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \ 0x0543a4UL -#define PCIE_REG_DBG_COMMON_FORCE_FRAME \ +#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \ 0x0543a8UL #define MISC_REG_RESET_PL_UA \ 0x008050UL @@ -1328,85 +1331,85 @@ 0x128170cUL #define UCM_REG_SM_TASK_CTX \ 0x1281710UL -#define XSEM_REG_SLOW_DBG_EMPTY \ +#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1401140UL #define XSEM_REG_SYNC_DBG_EMPTY \ 0x1401160UL -#define XSEM_REG_SLOW_DBG_ACTIVE \ +#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1401400UL -#define XSEM_REG_SLOW_DBG_MODE \ +#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1401404UL -#define XSEM_REG_DBG_FRAME_MODE \ +#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1401408UL -#define XSEM_REG_DBG_MODE1_CFG \ +#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ 0x1440000UL #define YSEM_REG_SYNC_DBG_EMPTY \ 0x1501160UL -#define YSEM_REG_SLOW_DBG_ACTIVE \ +#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1501400UL -#define YSEM_REG_SLOW_DBG_MODE \ +#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1501404UL -#define YSEM_REG_DBG_FRAME_MODE \ +#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1501408UL -#define YSEM_REG_DBG_MODE1_CFG \ +#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ 0x1540000UL -#define PSEM_REG_SLOW_DBG_EMPTY \ +#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1601140UL #define PSEM_REG_SYNC_DBG_EMPTY \ 0x1601160UL -#define PSEM_REG_SLOW_DBG_ACTIVE \ +#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1601400UL -#define PSEM_REG_SLOW_DBG_MODE \ +#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1601404UL -#define PSEM_REG_DBG_FRAME_MODE \ +#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1601408UL -#define PSEM_REG_DBG_MODE1_CFG \ +#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ 0x1640000UL -#define TSEM_REG_SLOW_DBG_EMPTY \ +#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1701140UL #define TSEM_REG_SYNC_DBG_EMPTY \ 0x1701160UL -#define TSEM_REG_SLOW_DBG_ACTIVE \ +#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1701400UL -#define TSEM_REG_SLOW_DBG_MODE \ +#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1701404UL -#define TSEM_REG_DBG_FRAME_MODE \ +#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1701408UL -#define TSEM_REG_DBG_MODE1_CFG \ +#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ 0x1740000UL -#define MSEM_REG_SLOW_DBG_EMPTY \ +#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1801140UL #define MSEM_REG_SYNC_DBG_EMPTY \ 0x1801160UL -#define MSEM_REG_SLOW_DBG_ACTIVE \ +#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1801400UL -#define MSEM_REG_SLOW_DBG_MODE \ +#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1801404UL -#define MSEM_REG_DBG_FRAME_MODE \ +#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1801408UL -#define MSEM_REG_DBG_MODE1_CFG \ +#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL -#define USEM_REG_SLOW_DBG_EMPTY \ +#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1901140UL #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL -#define USEM_REG_SLOW_DBG_ACTIVE \ +#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1901400UL -#define USEM_REG_SLOW_DBG_MODE \ +#define USEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1901404UL -#define USEM_REG_DBG_FRAME_MODE \ +#define USEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1901408UL -#define USEM_REG_DBG_MODE1_CFG \ +#define USEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL @@ -1430,7 +1433,7 @@ 0x340800UL #define BRB_REG_BIG_RAM_DATA \ 0x341500UL -#define SEM_FAST_REG_STALL_0 \ +#define SEM_FAST_REG_STALL_0_BB_K2 \ 0x000488UL #define SEM_FAST_REG_STALLED \ 0x000494UL @@ -1480,37 +1483,37 @@ 4 #define MISC_REG_BLOCK_256B_EN \ 0x008c14UL -#define NWS_REG_NWS_CMU \ +#define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ 0x0006c4UL -#define MS_REG_MS_CMU \ +#define MS_REG_MS_CMU_K2 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \ - 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ + 0x000210UL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_REG_PHY0 \ +#define PHY_PCIE_REG_PHY0_K2 \ 0x620000UL -#define PHY_PCIE_REG_PHY1 \ +#define PHY_PCIE_REG_PHY1_K2 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL @@ -1557,9 +1560,16 @@ #define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL #define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL #define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL +#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL #define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL #define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL +#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0) +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0 +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1) +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 + #define PRS_REG_SEARCH_GFT 0x1f11bcUL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_GFT_CAM 0x1f1100UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 56289d7cd306..fb7c2d1562ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -35,12 +35,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/if_ether.h> -#include <linux/if_vlan.h> #include <linux/io.h> -#include <linux/ip.h> -#include <linux/ipv6.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> @@ -49,10 +44,6 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> -#include <linux/tcp.h> -#include <linux/bitops.h> -#include <linux/qed/qed_roce_if.h> -#include <linux/qed/qed_roce_if.h> #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" @@ -62,18 +53,21 @@ #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" -#include "qed_sp.h" +#include <linux/qed/qed_rdma_if.h> +#include "qed_rdma.h" #include "qed_roce.h" -#include "qed_ll2.h" +#include "qed_sp.h" static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); -void qed_roce_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, union rdma_eqe_data *rdma_data) +static int +qed_roce_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code) { if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = - (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid); + (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); /* icid release in this async event can occur only if the icid * was offloaded to the FW. In case it wasn't offloaded this is @@ -85,290 +79,15 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn, events->affiliated_event(p_hwfn->p_rdma_info->events.context, fw_event_code, - &rdma_data->async_handle); - } -} - -static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 max_count, char *name) -{ - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); - - bmap->max_count = max_count; - - bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long), - GFP_KERNEL); - if (!bmap->bitmap) { - DP_NOTICE(p_hwfn, - "qed bmap alloc failed: cannot allocate memory (bitmap)\n"); - return -ENOMEM; + (void *)&data->rdma_data.async_handle); } - snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); - return 0; -} - -static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 *id_num) -{ - *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); - if (*id_num >= bmap->max_count) - return -EINVAL; - - __set_bit(*id_num, bmap->bitmap); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", - bmap->name, *id_num); - return 0; } -static void qed_bmap_set_id(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 id_num) -{ - if (id_num >= bmap->max_count) - return; - - __set_bit(id_num, bmap->bitmap); -} - -static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 id_num) -{ - bool b_acquired; - - if (id_num >= bmap->max_count) - return; - - b_acquired = test_and_clear_bit(id_num, bmap->bitmap); - if (!b_acquired) { - DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", - bmap->name, id_num); - return; - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", - bmap->name, id_num); -} - -static int qed_bmap_test_id(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 id_num) -{ - if (id_num >= bmap->max_count) - return -1; - - return test_bit(id_num, bmap->bitmap); -} - -static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) -{ - /* First sb id for RoCE is after all the l2 sb */ - return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; -} - -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) -{ - struct qed_rdma_info *p_rdma_info; - u32 num_cons, num_tasks; - int rc = -ENOMEM; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); - - /* Allocate a struct with current pf rdma info */ - p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); - if (!p_rdma_info) { - DP_NOTICE(p_hwfn, - "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n", - rc); - return rc; - } - - p_hwfn->p_rdma_info = p_rdma_info; - p_rdma_info->proto = PROTOCOLID_ROCE; - - num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, - NULL); - - p_rdma_info->num_qps = num_cons / 2; - - num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); - - /* Each MR uses a single task */ - p_rdma_info->num_mrs = num_tasks; - - /* Queue zone lines are shared between RoCE and L2 in such a way that - * they can be used by each without obstructing the other. - */ - p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); - p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); - - /* Allocate a struct with device params and fill it */ - p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); - if (!p_rdma_info->dev) { - DP_NOTICE(p_hwfn, - "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n", - rc); - goto free_rdma_info; - } - - /* Allocate a struct with port params and fill it */ - p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); - if (!p_rdma_info->port) { - DP_NOTICE(p_hwfn, - "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n", - rc); - goto free_rdma_dev; - } - - /* Allocate bit map for pd's */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, - "PD"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate pd_map, rc = %d\n", - rc); - goto free_rdma_port; - } - - /* Allocate DPI bitmap */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, - p_hwfn->dpi_count, "DPI"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate DPI bitmap, rc = %d\n", rc); - goto free_pd_map; - } - - /* Allocate bitmap for cq's. The maximum number of CQs is bounded to - * twice the number of QPs. - */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, - p_rdma_info->num_qps * 2, "CQ"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate cq bitmap, rc = %d\n", rc); - goto free_dpi_map; - } - - /* Allocate bitmap for toggle bit for cq icids - * We toggle the bit every time we create or resize cq for a given icid. - * The maximum number of CQs is bounded to twice the number of QPs. - */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, - p_rdma_info->num_qps * 2, "Toggle"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate toogle bits, rc = %d\n", rc); - goto free_cq_map; - } - - /* Allocate bitmap for itids */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, - p_rdma_info->num_mrs, "MR"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate itids bitmaps, rc = %d\n", rc); - goto free_toggle_map; - } - - /* Allocate bitmap for cids used for qps. */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, - "CID"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate cid bitmap, rc = %d\n", rc); - goto free_tid_map; - } - - /* Allocate bitmap for cids used for responders/requesters. */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, - "REAL_CID"); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate real cid bitmap, rc = %d\n", rc); - goto free_cid_map; - } - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); - return 0; - -free_cid_map: - kfree(p_rdma_info->cid_map.bitmap); -free_tid_map: - kfree(p_rdma_info->tid_map.bitmap); -free_toggle_map: - kfree(p_rdma_info->toggle_bits.bitmap); -free_cq_map: - kfree(p_rdma_info->cq_map.bitmap); -free_dpi_map: - kfree(p_rdma_info->dpi_map.bitmap); -free_pd_map: - kfree(p_rdma_info->pd_map.bitmap); -free_rdma_port: - kfree(p_rdma_info->port); -free_rdma_dev: - kfree(p_rdma_info->dev); -free_rdma_info: - kfree(p_rdma_info); - - return rc; -} - -static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, bool check) -{ - int weight = bitmap_weight(bmap->bitmap, bmap->max_count); - int last_line = bmap->max_count / (64 * 8); - int last_item = last_line * 8 + - DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); - u64 *pmap = (u64 *)bmap->bitmap; - int line, item, offset; - u8 str_last_line[200] = { 0 }; - - if (!weight || !check) - goto end; - - DP_NOTICE(p_hwfn, - "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", - bmap->name, bmap->max_count, weight); - - /* print aligned non-zero lines, if any */ - for (item = 0, line = 0; line < last_line; line++, item += 8) - if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) - DP_NOTICE(p_hwfn, - "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", - line, - pmap[item], - pmap[item + 1], - pmap[item + 2], - pmap[item + 3], - pmap[item + 4], - pmap[item + 5], - pmap[item + 6], pmap[item + 7]); - - /* print last unaligned non-zero line, if any */ - if ((bmap->max_count % (64 * 8)) && - (bitmap_weight((unsigned long *)&pmap[item], - bmap->max_count - item * 64))) { - offset = sprintf(str_last_line, "line 0x%04x: ", line); - for (; item < last_item; item++) - offset += sprintf(str_last_line + offset, - "0x%016llx ", pmap[item]); - DP_NOTICE(p_hwfn, "%s\n", str_last_line); - } - -end: - kfree(bmap->bitmap); - bmap->bitmap = NULL; -} - -static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) +void qed_roce_stop(struct qed_hwfn *p_hwfn) { struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; - struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; int wait_count = 0; /* when destroying a_RoCE QP the control is returned to the user after @@ -383,780 +102,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) break; } } - - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); - qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); - - kfree(p_rdma_info->port); - kfree(p_rdma_info->dev); - - kfree(p_rdma_info); -} - -static void qed_rdma_free(struct qed_hwfn *p_hwfn) -{ - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); - - qed_rdma_resc_free(p_hwfn); -} - -static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) -{ - guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; - guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; - guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; - guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; - guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; -} - -static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, - struct qed_rdma_start_in_params *params) -{ - struct qed_rdma_events *events; - - events = &p_hwfn->p_rdma_info->events; - - events->unaffiliated_event = params->events->unaffiliated_event; - events->affiliated_event = params->events->affiliated_event; - events->context = params->events->context; -} - -static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, - struct qed_rdma_start_in_params *params) -{ - struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; - struct qed_dev *cdev = p_hwfn->cdev; - u32 pci_status_control; - u32 num_qps; - - /* Vendor specific information */ - dev->vendor_id = cdev->vendor_id; - dev->vendor_part_id = cdev->device_id; - dev->hw_ver = 0; - dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | - (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); - - qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); - dev->node_guid = dev->sys_image_guid; - - dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, - RDMA_MAX_SGE_PER_RQ_WQE); - - if (cdev->rdma_max_sge) - dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); - - dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; - - dev->max_inline = (cdev->rdma_max_inline) ? - min_t(u32, cdev->rdma_max_inline, dev->max_inline) : - dev->max_inline; - - dev->max_wqe = QED_RDMA_MAX_WQE; - dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); - - /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because - * it is up-aligned to 16 and then to ILT page size within qed cxt. - * This is OK in terms of ILT but we don't want to configure the FW - * above its abilities - */ - num_qps = ROCE_MAX_QPS; - num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); - dev->max_qp = num_qps; - - /* CQs uses the same icids that QPs use hence they are limited by the - * number of icids. There are two icids per QP. - */ - dev->max_cq = num_qps * 2; - - /* The number of mrs is smaller by 1 since the first is reserved */ - dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; - dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; - - /* The maximum CQE capacity per CQ supported. - * max number of cqes will be in two layer pbl, - * 8 is the pointer size in bytes - * 32 is the size of cq element in bytes - */ - if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) - dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; - else - dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; - - dev->max_mw = 0; - dev->max_fmr = QED_RDMA_MAX_FMR; - dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); - dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; - dev->max_pkey = QED_RDMA_MAX_P_KEY; - - dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / - (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); - dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / - RDMA_REQ_RD_ATOMIC_ELM_SIZE; - dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * - p_hwfn->p_rdma_info->num_qps; - dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; - dev->dev_ack_delay = QED_RDMA_ACK_DELAY; - dev->max_pd = RDMA_MAX_PDS; - dev->max_ah = p_hwfn->p_rdma_info->num_qps; - dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); - - /* Set capablities */ - dev->dev_caps = 0; - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); - - /* Check atomic operations support in PCI configuration space. */ - pci_read_config_dword(cdev->pdev, - cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, - &pci_status_control); - - if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) - SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); -} - -static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) -{ - struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; - struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; - - port->port_state = p_hwfn->mcp_info->link_output.link_up ? - QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; - - port->max_msg_size = min_t(u64, - (dev->max_mr_mw_fmr_size * - p_hwfn->cdev->rdma_max_sge), - BIT(31)); - - port->pkey_bad_counter = 0; -} - -static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - u32 ll2_ethertype_en; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); - p_hwfn->b_rdma_enabled_in_prs = false; - - qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); - - p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; - - /* We delay writing to this reg until first cid is allocated. See - * qed_cxt_dynamic_ilt_alloc function for more details - */ - ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); - qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, - (ll2_ethertype_en | 0x01)); - - if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { - DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); - return -EINVAL; - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); - return 0; -} - -static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, - struct qed_rdma_start_in_params *params, - struct qed_ptt *p_ptt) -{ - struct rdma_init_func_ramrod_data *p_ramrod; - struct qed_rdma_cnq_params *p_cnq_pbl_list; - struct rdma_init_func_hdr *p_params_header; - struct rdma_cnq_params *p_cnq_params; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - u32 cnq_id, sb_id; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); - - /* Save the number of cnqs for the function close ramrod */ - p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) - return rc; - - p_ramrod = &p_ent->ramrod.roce_init_func.rdma; - - p_params_header = &p_ramrod->params_header; - p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, - QED_RDMA_CNQ_RAM); - p_params_header->num_cnqs = params->desired_cnq; - - if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) - p_params_header->cq_ring_mode = 1; - else - p_params_header->cq_ring_mode = 0; - - for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { - sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); - p_cnq_params = &p_ramrod->cnq_params[cnq_id]; - p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; - p_cnq_params->sb_num = - cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id); - - p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; - p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; - - DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, - p_cnq_pbl_list->pbl_ptr); - - /* we assume here that cnq_id and qz_offset are the same */ - p_cnq_params->queue_zone_num = - cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + - cnq_id); - } - - return qed_spq_post(p_hwfn, p_ent, NULL); -} - -static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); - - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, - &p_hwfn->p_rdma_info->tid_map, itid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - if (rc) - goto out; - - rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); -out: - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); - return rc; -} - -static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) -{ - struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; - - /* The first DPI is reserved for the Kernel */ - __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); - - /* Tid 0 will be used as the key for "reserved MR". - * The driver should allocate memory for it so it can be loaded but no - * ramrod should be passed on it. - */ - qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); - if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { - DP_NOTICE(p_hwfn, - "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); - return -EINVAL; - } - - return 0; -} - -static int qed_rdma_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) -{ - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); - - spin_lock_init(&p_hwfn->p_rdma_info->lock); - - qed_rdma_init_devinfo(p_hwfn, params); - qed_rdma_init_port(p_hwfn); - qed_rdma_init_events(p_hwfn, params); - - rc = qed_rdma_reserve_lkey(p_hwfn); - if (rc) - return rc; - - rc = qed_rdma_init_hw(p_hwfn, p_ptt); - if (rc) - return rc; - - return qed_rdma_start_fw(p_hwfn, params, p_ptt); -} - -static int qed_rdma_stop(void *rdma_cxt) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_close_func_ramrod_data *p_ramrod; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - struct qed_ptt *p_ptt; - u32 ll2_ethertype_en; - int rc = -EBUSY; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); - return rc; - } - - /* Disable RoCE search */ - qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); - p_hwfn->b_rdma_enabled_in_prs = false; - - qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); - - ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); - - qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, - (ll2_ethertype_en & 0xFFFE)); - - qed_ptt_release(p_hwfn, p_ptt); - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - /* Stop RoCE */ - rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) - goto out; - - p_ramrod = &p_ent->ramrod.rdma_close_func; - - p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; - p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); - - rc = qed_spq_post(p_hwfn, p_ent, NULL); - -out: - qed_rdma_free(p_hwfn); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); - return rc; -} - -static int qed_rdma_add_user(void *rdma_cxt, - struct qed_rdma_add_user_out_params *out_params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - u32 dpi_start_offset; - u32 returned_id = 0; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); - - /* Allocate DPI */ - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, - &returned_id); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - - out_params->dpi = (u16)returned_id; - - /* Calculate the corresponding DPI address */ - dpi_start_offset = p_hwfn->dpi_start_offset; - - out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + - dpi_start_offset + - ((out_params->dpi) * p_hwfn->dpi_size)); - - out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + - dpi_start_offset + - ((out_params->dpi) * p_hwfn->dpi_size); - - out_params->dpi_size = p_hwfn->dpi_size; - out_params->wid_count = p_hwfn->wid_count; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); - return rc; -} - -static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); - - /* Link may have changed */ - p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? - QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; - - p_port->link_speed = p_hwfn->mcp_info->link_output.speed; - - p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; - - return p_port; -} - -static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); - - /* Return struct with device parameters */ - return p_hwfn->p_rdma_info->dev; -} - -static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); - - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); -} - -static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) -{ - struct qed_hwfn *p_hwfn; - u16 qz_num; - u32 addr; - - p_hwfn = (struct qed_hwfn *)rdma_cxt; - - if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { - DP_NOTICE(p_hwfn, - "queue zone offset %d is too large (max is %d)\n", - qz_offset, p_hwfn->p_rdma_info->max_queue_zones); - return; - } - - qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); - - REG_WR16(p_hwfn, addr, prod); - - /* keep prod updates ordered */ - wmb(); -} - -static int qed_fill_rdma_dev_info(struct qed_dev *cdev, - struct qed_dev_rdma_info *info) -{ - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - - memset(info, 0, sizeof(*info)); - - info->rdma_type = QED_RDMA_TYPE_ROCE; - info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); - - qed_fill_dev_info(cdev, &info->common); - - return 0; -} - -static int qed_rdma_get_sb_start(struct qed_dev *cdev) -{ - int feat_num; - - if (cdev->num_hwfns > 1) - feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); - else - feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * - cdev->num_hwfns; - - return feat_num; -} - -static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) -{ - int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); - int n_msix = cdev->int_params.rdma_msix_cnt; - - return min_t(int, n_cnq, n_msix); -} - -static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) -{ - int limit = 0; - - /* Mark the fastpath as free/used */ - cdev->int_params.fp_initialized = cnt ? true : false; - - if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { - DP_ERR(cdev, - "qed roce supports only MSI-X interrupts (detected %d).\n", - cdev->int_params.out.int_mode); - return -EINVAL; - } else if (cdev->int_params.fp_msix_cnt) { - limit = cdev->int_params.rdma_msix_cnt; - } - - if (!limit) - return -ENOMEM; - - return min_t(int, cnt, limit); -} - -static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) -{ - memset(info, 0, sizeof(*info)); - - if (!cdev->int_params.fp_initialized) { - DP_INFO(cdev, - "Protocol driver requested interrupt information, but its support is not yet configured\n"); - return -EINVAL; - } - - if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - int msix_base = cdev->int_params.rdma_msix_base; - - info->msix_cnt = cdev->int_params.rdma_msix_cnt; - info->msix = &cdev->int_params.msix_table[msix_base]; - - DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", - info->msix_cnt, msix_base); - } - - return 0; -} - -static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - u32 returned_id; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); - - /* Allocates an unused protection domain */ - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, - &p_hwfn->p_rdma_info->pd_map, &returned_id); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - - *pd = (u16)returned_id; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); - return rc; -} - -static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); - - /* Returns a previously allocated protection domain for reuse */ - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); -} - -static enum qed_rdma_toggle_bit -qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) -{ - struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; - enum qed_rdma_toggle_bit toggle_bit; - u32 bmap_id; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); - - /* the function toggle the bit that is related to a given icid - * and returns the new toggle bit's value - */ - bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); - - spin_lock_bh(&p_info->lock); - toggle_bit = !test_and_change_bit(bmap_id, - p_info->toggle_bits.bitmap); - spin_unlock_bh(&p_info->lock); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", - toggle_bit); - - return toggle_bit; -} - -static int qed_rdma_create_cq(void *rdma_cxt, - struct qed_rdma_create_cq_in_params *params, - u16 *icid) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; - struct rdma_create_cq_ramrod_data *p_ramrod; - enum qed_rdma_toggle_bit toggle_bit; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - u32 returned_id, start_cid; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", - params->cq_handle_hi, params->cq_handle_lo); - - /* Allocate icid */ - spin_lock_bh(&p_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); - spin_unlock_bh(&p_info->lock); - - if (rc) { - DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); - return rc; - } - - start_cid = qed_cxt_get_proto_cid_start(p_hwfn, - p_info->proto); - *icid = returned_id + start_cid; - - /* Check if icid requires a page allocation */ - rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); - if (rc) - goto err; - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.cid = *icid; - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - /* Send create CQ ramrod */ - rc = qed_sp_init_request(p_hwfn, &p_ent, - RDMA_RAMROD_CREATE_CQ, - p_info->proto, &init_data); - if (rc) - goto err; - - p_ramrod = &p_ent->ramrod.rdma_create_cq; - - p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); - p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); - p_ramrod->dpi = cpu_to_le16(params->dpi); - p_ramrod->is_two_level_pbl = params->pbl_two_level; - p_ramrod->max_cqes = cpu_to_le32(params->cq_size); - DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); - p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); - p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + - params->cnq_id; - p_ramrod->int_timeout = params->int_timeout; - - /* toggle the bit for every resize or create cq for a given icid */ - toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); - - p_ramrod->toggle_bit = toggle_bit; - - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) { - /* restore toggle bit */ - qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); - goto err; - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); - return rc; - -err: - /* release allocated icid */ - spin_lock_bh(&p_info->lock); - qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); - spin_unlock_bh(&p_info->lock); - DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); - - return rc; -} - -static int -qed_rdma_destroy_cq(void *rdma_cxt, - struct qed_rdma_destroy_cq_in_params *in_params, - struct qed_rdma_destroy_cq_out_params *out_params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_destroy_cq_output_params *p_ramrod_res; - struct rdma_destroy_cq_ramrod_data *p_ramrod; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - dma_addr_t ramrod_res_phys; - int rc = -ENOMEM; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); - - p_ramrod_res = - (struct rdma_destroy_cq_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_destroy_cq_output_params), - &ramrod_res_phys, GFP_KERNEL); - if (!p_ramrod_res) { - DP_NOTICE(p_hwfn, - "qed destroy cq failed: cannot allocate memory (ramrod)\n"); - return rc; - } - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.cid = in_params->icid; - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - /* Send destroy CQ ramrod */ - rc = qed_sp_init_request(p_hwfn, &p_ent, - RDMA_RAMROD_DESTROY_CQ, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) - goto err; - - p_ramrod = &p_ent->ramrod.rdma_destroy_cq; - DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); - - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) - goto err; - - out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); - - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_destroy_cq_output_params), - p_ramrod_res, ramrod_res_phys); - - /* Free icid */ - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - - qed_bmap_release_id(p_hwfn, - &p_hwfn->p_rdma_info->cq_map, - (in_params->icid - - qed_cxt_get_proto_cid_start(p_hwfn, - p_hwfn-> - p_rdma_info->proto))); - - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); - return rc; - -err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_destroy_cq_output_params), - p_ramrod_res, ramrod_res_phys); - - return rc; -} - -static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) -{ - p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); - p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); - p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, @@ -1210,7 +156,7 @@ void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) +int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 responder_icid; @@ -1870,9 +816,9 @@ err: return rc; } -static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) +int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) { struct roce_query_qp_resp_output_params *p_resp_ramrod_res; struct roce_query_qp_req_output_params *p_req_ramrod_res; @@ -2011,7 +957,7 @@ err_resp: return rc; } -static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; @@ -2050,138 +996,10 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return 0; } -static int qed_rdma_query_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - /* The following fields are filled in from qp and not FW as they can't - * be modified by FW - */ - out_params->mtu = qp->mtu; - out_params->dest_qp = qp->dest_qp; - out_params->incoming_atomic_en = qp->incoming_atomic_en; - out_params->e2e_flow_control_en = qp->e2e_flow_control_en; - out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; - out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; - out_params->dgid = qp->dgid; - out_params->flow_label = qp->flow_label; - out_params->hop_limit_ttl = qp->hop_limit_ttl; - out_params->traffic_class_tos = qp->traffic_class_tos; - out_params->timeout = qp->ack_timeout; - out_params->rnr_retry = qp->rnr_retry_cnt; - out_params->retry_cnt = qp->retry_cnt; - out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; - out_params->pkey_index = 0; - out_params->max_rd_atomic = qp->max_rd_atomic_req; - out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; - out_params->sqd_async = qp->sqd_async; - - rc = qed_roce_query_qp(p_hwfn, qp, out_params); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); - return rc; -} - -static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc = 0; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - rc = qed_roce_destroy_qp(p_hwfn, qp); - - /* free qp params struct */ - kfree(qp); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); - return rc; -} - -static struct qed_rdma_qp * -qed_rdma_create_qp(void *rdma_cxt, - struct qed_rdma_create_qp_in_params *in_params, - struct qed_rdma_create_qp_out_params *out_params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct qed_rdma_qp *qp; - u8 max_stats_queues; - int rc; - - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { - DP_ERR(p_hwfn->cdev, - "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", - rdma_cxt, in_params, out_params); - return NULL; - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "qed rdma create qp called with qp_handle = %08x%08x\n", - in_params->qp_handle_hi, in_params->qp_handle_lo); - - /* Some sanity checks... */ - max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; - if (in_params->stats_queue >= max_stats_queues) { - DP_ERR(p_hwfn->cdev, - "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", - in_params->stats_queue, max_stats_queues); - return NULL; - } - - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) { - DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n"); - return NULL; - } - - rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); - qp->qpid = ((0xFF << 16) | qp->icid); - - DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid); - - if (rc) { - kfree(qp); - return NULL; - } - - qp->cur_state = QED_ROCE_QP_STATE_RESET; - qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); - qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); - qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); - qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); - qp->use_srq = in_params->use_srq; - qp->signal_all = in_params->signal_all; - qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; - qp->pd = in_params->pd; - qp->dpi = in_params->dpi; - qp->sq_cq_id = in_params->sq_cq_id; - qp->sq_num_pages = in_params->sq_num_pages; - qp->sq_pbl_ptr = in_params->sq_pbl_ptr; - qp->rq_cq_id = in_params->rq_cq_id; - qp->rq_num_pages = in_params->rq_num_pages; - qp->rq_pbl_ptr = in_params->rq_pbl_ptr; - qp->srq_id = in_params->srq_id; - qp->req_offloaded = false; - qp->resp_offloaded = false; - qp->e2e_flow_control_en = qp->use_srq ? false : true; - qp->stats_queue = in_params->stats_queue; - - out_params->icid = qp->icid; - out_params->qp_id = qp->qpid; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); - return qp; -} - -static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - enum qed_roce_qp_state prev_state, - struct qed_rdma_modify_qp_in_params *params) +int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_roce_qp_state prev_state, + struct qed_rdma_modify_qp_in_params *params) { u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; @@ -2286,331 +1104,6 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, return rc; } -static int qed_rdma_modify_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_modify_qp_in_params *params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - enum qed_roce_qp_state prev_state; - int rc = 0; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", - qp->icid, params->new_state); - - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - return rc; - } - - if (GET_FIELD(params->modify_flags, - QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { - qp->incoming_rdma_read_en = params->incoming_rdma_read_en; - qp->incoming_rdma_write_en = params->incoming_rdma_write_en; - qp->incoming_atomic_en = params->incoming_atomic_en; - } - - /* Update QP structure with the updated values */ - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) - qp->roce_mode = params->roce_mode; - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) - qp->pkey = params->pkey; - if (GET_FIELD(params->modify_flags, - QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) - qp->e2e_flow_control_en = params->e2e_flow_control_en; - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) - qp->dest_qp = params->dest_qp; - if (GET_FIELD(params->modify_flags, - QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { - /* Indicates that the following parameters have changed: - * Traffic class, flow label, hop limit, source GID, - * destination GID, loopback indicator - */ - qp->traffic_class_tos = params->traffic_class_tos; - qp->flow_label = params->flow_label; - qp->hop_limit_ttl = params->hop_limit_ttl; - - qp->sgid = params->sgid; - qp->dgid = params->dgid; - qp->udp_src_port = 0; - qp->vlan_id = params->vlan_id; - qp->mtu = params->mtu; - qp->lb_indication = params->lb_indication; - memcpy((u8 *)&qp->remote_mac_addr[0], - (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); - if (params->use_local_mac) { - memcpy((u8 *)&qp->local_mac_addr[0], - (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); - } else { - memcpy((u8 *)&qp->local_mac_addr[0], - (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); - } - } - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) - qp->rq_psn = params->rq_psn; - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) - qp->sq_psn = params->sq_psn; - if (GET_FIELD(params->modify_flags, - QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) - qp->max_rd_atomic_req = params->max_rd_atomic_req; - if (GET_FIELD(params->modify_flags, - QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) - qp->max_rd_atomic_resp = params->max_rd_atomic_resp; - if (GET_FIELD(params->modify_flags, - QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) - qp->ack_timeout = params->ack_timeout; - if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) - qp->retry_cnt = params->retry_cnt; - if (GET_FIELD(params->modify_flags, - QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) - qp->rnr_retry_cnt = params->rnr_retry_cnt; - if (GET_FIELD(params->modify_flags, - QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) - qp->min_rnr_nak_timer = params->min_rnr_nak_timer; - - qp->sqd_async = params->sqd_async; - - prev_state = qp->cur_state; - if (GET_FIELD(params->modify_flags, - QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { - qp->cur_state = params->new_state; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", - qp->cur_state); - } - - rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); - return rc; -} - -static int -qed_rdma_register_tid(void *rdma_cxt, - struct qed_rdma_register_tid_in_params *params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_register_tid_ramrod_data *p_ramrod; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - enum rdma_tid_type tid_type; - u8 fw_return_code; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - return rc; - } - - if (p_hwfn->p_rdma_info->last_tid < params->itid) - p_hwfn->p_rdma_info->last_tid = params->itid; - - p_ramrod = &p_ent->ramrod.rdma_register_tid; - - p_ramrod->flags = 0; - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, - params->pbl_two_level); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); - - /* Don't initialize D/C field, as it may override other bits. */ - if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, - params->page_size_log - 12); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID, - p_hwfn->p_rdma_info->last_tid); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, - params->remote_read); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, - params->remote_write); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, - params->remote_atomic); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, - params->local_write); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); - - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, - params->mw_bind); - - SET_FIELD(p_ramrod->flags1, - RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, - params->pbl_page_size_log - 12); - - SET_FIELD(p_ramrod->flags2, - RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); - - switch (params->tid_type) { - case QED_RDMA_TID_REGISTERED_MR: - tid_type = RDMA_TID_REGISTERED_MR; - break; - case QED_RDMA_TID_FMR: - tid_type = RDMA_TID_FMR; - break; - case QED_RDMA_TID_MW_TYPE1: - tid_type = RDMA_TID_MW_TYPE1; - break; - case QED_RDMA_TID_MW_TYPE2A: - tid_type = RDMA_TID_MW_TYPE2A; - break; - default: - rc = -EINVAL; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - return rc; - } - SET_FIELD(p_ramrod->flags1, - RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); - - p_ramrod->itid = cpu_to_le32(params->itid); - p_ramrod->key = params->key; - p_ramrod->pd = cpu_to_le16(params->pd); - p_ramrod->length_hi = (u8)(params->length >> 32); - p_ramrod->length_lo = DMA_LO_LE(params->length); - if (params->zbva) { - /* Lower 32 bits of the registered MR address. - * In case of zero based MR, will hold FBO - */ - p_ramrod->va.hi = 0; - p_ramrod->va.lo = cpu_to_le32(params->fbo); - } else { - DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); - } - DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); - - /* DIF */ - if (params->dif_enabled) { - SET_FIELD(p_ramrod->flags2, - RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); - DMA_REGPAIR_LE(p_ramrod->dif_error_addr, - params->dif_error_addr); - DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); - } - - rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); - if (rc) - return rc; - - if (fw_return_code != RDMA_RETURN_OK) { - DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); - return -EINVAL; - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); - return rc; -} - -static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_deregister_tid_ramrod_data *p_ramrod; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - struct qed_ptt *p_ptt; - u8 fw_return_code; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - return rc; - } - - p_ramrod = &p_ent->ramrod.rdma_deregister_tid; - p_ramrod->itid = cpu_to_le32(itid); - - rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - return rc; - } - - if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { - DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); - return -EINVAL; - } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { - /* Bit indicating that the TID is in use and a nig drain is - * required before sending the ramrod again - */ - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) { - rc = -EBUSY; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to acquire PTT\n"); - return rc; - } - - rc = qed_mcp_drain(p_hwfn, p_ptt); - if (rc) { - qed_ptt_release(p_hwfn, p_ptt); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Drain failed\n"); - return rc; - } - - qed_ptt_release(p_hwfn, p_ptt); - - /* Resend the ramrod */ - rc = qed_sp_init_request(p_hwfn, &p_ent, - RDMA_RAMROD_DEREGISTER_MR, - p_hwfn->p_rdma_info->proto, - &init_data); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to init sp-element\n"); - return rc; - } - - rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); - if (rc) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Ramrod failed\n"); - return rc; - } - - if (fw_return_code != RDMA_RETURN_OK) { - DP_NOTICE(p_hwfn, "fw_return_code = %d\n", - fw_return_code); - return rc; - } - } - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); - return rc; -} - static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; @@ -2636,414 +1129,43 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) +void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - return QED_LEADING_HWFN(cdev); -} + u8 val; -static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - u32 val; - - val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; - - qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); - DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), - "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", - val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); -} - -void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - p_hwfn->db_bar_no_edpm = true; + /* if any QPs are already active, we want to disable DPM, since their + * context information contains information from before the latest DCBx + * update. Otherwise enable it. + */ + val = qed_rdma_allocated_qps(p_hwfn) ? true : false; + p_hwfn->dcbx_no_edpm = (u8)val; qed_rdma_dpm_conf(p_hwfn, p_ptt); } -static int qed_rdma_start(void *rdma_cxt, - struct qed_rdma_start_in_params *params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct qed_ptt *p_ptt; - int rc = -EBUSY; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "desired_cnq = %08x\n", params->desired_cnq); - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - goto err; - - rc = qed_rdma_alloc(p_hwfn, p_ptt, params); - if (rc) - goto err1; - - rc = qed_rdma_setup(p_hwfn, p_ptt, params); - if (rc) - goto err2; - - qed_ptt_release(p_hwfn, p_ptt); - - return rc; - -err2: - qed_rdma_free(p_hwfn); -err1: - qed_ptt_release(p_hwfn, p_ptt); -err: - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); - return rc; -} - -static int qed_rdma_init(struct qed_dev *cdev, - struct qed_rdma_start_in_params *params) +int qed_roce_setup(struct qed_hwfn *p_hwfn) { - return qed_rdma_start(QED_LEADING_HWFN(cdev), params); -} - -static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); - - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); -} - -void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet) -{ - struct qed_roce_ll2_packet *packet = cookie; - struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; - - roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet); + return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, + qed_roce_async_event); } -void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet) +int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle, - cookie, first_frag_addr, - b_last_fragment, b_last_packet); -} - -void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, bool b_last_packet) -{ - struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; - struct qed_roce_ll2_rx_params params; - struct qed_dev *cdev = p_hwfn->cdev; - struct qed_roce_ll2_packet pkt; - - DP_VERBOSE(cdev, - QED_MSG_LL2, - "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n", - (void *)(uintptr_t)rx_buf_addr, - data_length, data_length_error); - - memset(&pkt, 0, sizeof(pkt)); - pkt.n_seg = 1; - pkt.payload[0].baddr = rx_buf_addr; - pkt.payload[0].len = data_length; - - memset(¶ms, 0, sizeof(params)); - params.vlan_id = vlan; - *((u32 *)¶ms.smac[0]) = ntohl(src_mac_addr_hi); - *((u16 *)¶ms.smac[4]) = ntohs(src_mac_addr_lo); - - if (data_length_error) { - DP_ERR(cdev, - "roce ll2 rx complete: data length error %d, length=%d\n", - data_length_error, data_length); - params.rc = -EINVAL; - } - - roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, ¶ms); -} - -static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, - u8 *old_mac_address, - u8 *new_mac_address) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt; - int rc = 0; - - if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) { - DP_ERR(cdev, - "qed roce mac filter failed - roce_info/ll2 NULL\n"); - return -EINVAL; - } - - p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); - if (!p_ptt) { - DP_ERR(cdev, - "qed roce ll2 mac filter set: failed to acquire PTT\n"); - return -EINVAL; - } - - mutex_lock(&hwfn->ll2->lock); - if (old_mac_address) - qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, - old_mac_address); - if (new_mac_address) - rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, - new_mac_address); - mutex_unlock(&hwfn->ll2->lock); - - qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); - - if (rc) - DP_ERR(cdev, - "qed roce ll2 mac filter set: failed to add mac filter\n"); - - return rc; -} - -static int qed_roce_ll2_start(struct qed_dev *cdev, - struct qed_roce_ll2_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2; - struct qed_ll2_conn ll2_params; - int rc; - - if (!params) { - DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n"); - return -EINVAL; - } - if (!params->cbs.tx_cb || !params->cbs.rx_cb) { - DP_ERR(cdev, - "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n", - params->cbs.tx_cb, params->cbs.rx_cb); - return -EINVAL; - } - if (!is_valid_ether_addr(params->mac_address)) { - DP_ERR(cdev, - "qed roce ll2 start: failed due to invalid Ethernet address %pM\n", - params->mac_address); - return -EINVAL; - } - - /* Initialize */ - roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC); - if (!roce_ll2) { - DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); - return -ENOMEM; - } - roce_ll2->handle = QED_LL2_UNUSED_HANDLE; - roce_ll2->cbs = params->cbs; - roce_ll2->cb_cookie = params->cb_cookie; - mutex_init(&roce_ll2->lock); - - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.conn_type = QED_LL2_TYPE_ROCE; - ll2_params.mtu = params->mtu; - ll2_params.rx_drop_ttl0_flg = true; - ll2_params.rx_vlan_removal_en = false; - ll2_params.tx_dest = CORE_TX_DEST_NW; - ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET; - ll2_params.ai_err_no_buf = LL2_DROP_PACKET; - ll2_params.gsi_enable = true; - - rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params, - params->max_rx_buffers, - params->max_tx_buffers, - &roce_ll2->handle); - if (rc) { - DP_ERR(cdev, - "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n", - rc); - goto err; - } - - rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev), - roce_ll2->handle); - if (rc) { - DP_ERR(cdev, - "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n", - rc); - goto err1; - } - - hwfn->ll2 = roce_ll2; - - rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address); - if (rc) { - hwfn->ll2 = NULL; - goto err2; - } - ether_addr_copy(roce_ll2->mac_address, params->mac_address); - - return 0; - -err2: - qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); -err1: - qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); -err: - kfree(roce_ll2); - return rc; -} - -static int qed_roce_ll2_stop(struct qed_dev *cdev) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - int rc; - - if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { - DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); - return -EINVAL; - } - - /* remove LL2 MAC address filter */ - rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL); - eth_zero_addr(roce_ll2->mac_address); - - rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), - roce_ll2->handle); - if (rc) - DP_ERR(cdev, - "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n", - rc); - - qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); - - roce_ll2->handle = QED_LL2_UNUSED_HANDLE; + u32 ll2_ethertype_en; - kfree(roce_ll2); + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); - return rc; -} + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; -static int qed_roce_ll2_tx(struct qed_dev *cdev, - struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_tx_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - enum qed_ll2_roce_flavor_type qed_roce_flavor; - u8 flags = 0; - int rc; - int i; + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en | 0x01)); - if (!pkt || !params) { - DP_ERR(cdev, - "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", - cdev, pkt, params); + if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { + DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); return -EINVAL; } - qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE - : QED_LL2_RROCE; - - if (pkt->roce_mode == ROCE_V2_IPV4) - flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); - - /* Tx header */ - rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, - 1 + pkt->n_seg, 0, flags, 0, - QED_LL2_TX_DEST_NW, - qed_roce_flavor, pkt->header.baddr, - pkt->header.len, pkt, 1); - if (rc) { - DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc); - return QED_ROCE_TX_HEAD_FAILURE; - } - - /* Tx payload */ - for (i = 0; i < pkt->n_seg; i++) { - rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), - roce_ll2->handle, - pkt->payload[i].baddr, - pkt->payload[i].len); - if (rc) { - /* If failed not much to do here, partial packet has - * been posted * we can't free memory, will need to wait - * for completion - */ - DP_ERR(cdev, - "roce ll2 tx: payload failed (rc=%d)\n", rc); - return QED_ROCE_TX_FRAG_FAILURE; - } - } - + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); return 0; } - -static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev, - struct qed_roce_ll2_buffer *buf, - u64 cookie, u8 notify_fw) -{ - return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->ll2->handle, - buf->baddr, buf->len, - (void *)(uintptr_t)cookie, notify_fw); -} - -static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - - return qed_ll2_get_stats(QED_LEADING_HWFN(cdev), - roce_ll2->handle, stats); -} - -static const struct qed_rdma_ops qed_rdma_ops_pass = { - .common = &qed_common_ops_pass, - .fill_dev_info = &qed_fill_rdma_dev_info, - .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, - .rdma_init = &qed_rdma_init, - .rdma_add_user = &qed_rdma_add_user, - .rdma_remove_user = &qed_rdma_remove_user, - .rdma_stop = &qed_rdma_stop, - .rdma_query_port = &qed_rdma_query_port, - .rdma_query_device = &qed_rdma_query_device, - .rdma_get_start_sb = &qed_rdma_get_sb_start, - .rdma_get_rdma_int = &qed_rdma_get_int, - .rdma_set_rdma_int = &qed_rdma_set_int, - .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, - .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, - .rdma_alloc_pd = &qed_rdma_alloc_pd, - .rdma_dealloc_pd = &qed_rdma_free_pd, - .rdma_create_cq = &qed_rdma_create_cq, - .rdma_destroy_cq = &qed_rdma_destroy_cq, - .rdma_create_qp = &qed_rdma_create_qp, - .rdma_modify_qp = &qed_rdma_modify_qp, - .rdma_query_qp = &qed_rdma_query_qp, - .rdma_destroy_qp = &qed_rdma_destroy_qp, - .rdma_alloc_tid = &qed_rdma_alloc_tid, - .rdma_free_tid = &qed_rdma_free_tid, - .rdma_register_tid = &qed_rdma_register_tid, - .rdma_deregister_tid = &qed_rdma_deregister_tid, - .roce_ll2_start = &qed_roce_ll2_start, - .roce_ll2_stop = &qed_roce_ll2_stop, - .roce_ll2_tx = &qed_roce_ll2_tx, - .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer, - .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, - .roce_ll2_stats = &qed_roce_ll2_stats, -}; - -const struct qed_rdma_ops *qed_get_rdma_ops(void) -{ - return &qed_rdma_ops_pass; -} -EXPORT_SYMBOL(qed_get_rdma_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 9742af516183..f801f39fde61 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -32,191 +32,28 @@ #ifndef _QED_ROCE_H #define _QED_ROCE_H #include <linux/types.h> -#include <linux/bitops.h> -#include <linux/kernel.h> -#include <linux/list.h> #include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/qed/qed_if.h> -#include <linux/qed/qed_roce_if.h> -#include "qed.h" -#include "qed_dev_api.h" -#include "qed_hsi.h" -#include "qed_ll2.h" -#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) -#define QED_RDMA_MAX_P_KEY (1) -#define QED_RDMA_MAX_WQE (0x7FFF) -#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) -#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) -#define QED_RDMA_ACK_DELAY (15) -#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL) -#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS) -#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS) -/* Add 1 for header element */ -#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1) -#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE) -#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16) -#define QED_RDMA_MAX_SRQS (32 * 1024) - -#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) -#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) - -enum qed_rdma_toggle_bit { - QED_RDMA_TOGGLE_BIT_CLEAR = 0, - QED_RDMA_TOGGLE_BIT_SET = 1 -}; - -#define QED_RDMA_MAX_BMAP_NAME (10) -struct qed_bmap { - unsigned long *bitmap; - u32 max_count; - char name[QED_RDMA_MAX_BMAP_NAME]; -}; - -struct qed_rdma_info { - /* spin lock to protect bitmaps */ - spinlock_t lock; - - struct qed_bmap cq_map; - struct qed_bmap pd_map; - struct qed_bmap tid_map; - struct qed_bmap qp_map; - struct qed_bmap srq_map; - struct qed_bmap cid_map; - struct qed_bmap real_cid_map; - struct qed_bmap dpi_map; - struct qed_bmap toggle_bits; - struct qed_rdma_events events; - struct qed_rdma_device *dev; - struct qed_rdma_port *port; - u32 last_tid; - u8 num_cnqs; - u32 num_qps; - u32 num_mrs; - u16 queue_zone_base; - u16 max_queue_zones; - enum protocol_type proto; -}; - -struct qed_rdma_qp { - struct regpair qp_handle; - struct regpair qp_handle_async; - u32 qpid; - u16 icid; - enum qed_roce_qp_state cur_state; - bool use_srq; - bool signal_all; - bool fmr_and_reserved_lkey; - - bool incoming_rdma_read_en; - bool incoming_rdma_write_en; - bool incoming_atomic_en; - bool e2e_flow_control_en; - - u16 pd; - u16 pkey; - u32 dest_qp; - u16 mtu; - u16 srq_id; - u8 traffic_class_tos; - u8 hop_limit_ttl; - u16 dpi; - u32 flow_label; - bool lb_indication; - u16 vlan_id; - u32 ack_timeout; - u8 retry_cnt; - u8 rnr_retry_cnt; - u8 min_rnr_nak_timer; - bool sqd_async; - union qed_gid sgid; - union qed_gid dgid; - enum roce_mode roce_mode; - u16 udp_src_port; - u8 stats_queue; - - /* requeseter */ - u8 max_rd_atomic_req; - u32 sq_psn; - u16 sq_cq_id; - u16 sq_num_pages; - dma_addr_t sq_pbl_ptr; - void *orq; - dma_addr_t orq_phys_addr; - u8 orq_num_pages; - bool req_offloaded; +#if IS_ENABLED(CONFIG_QED_RDMA) +void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +#else +static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} +#endif - /* responder */ - u8 max_rd_atomic_resp; - u32 rq_psn; - u16 rq_cq_id; - u16 rq_num_pages; - dma_addr_t rq_pbl_ptr; - void *irq; - dma_addr_t irq_phys_addr; - u8 irq_num_pages; - bool resp_offloaded; - u32 cq_prod; +int qed_roce_setup(struct qed_hwfn *p_hwfn); +void qed_roce_stop(struct qed_hwfn *p_hwfn); +int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid); +int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); - u8 remote_mac_addr[6]; - u8 local_mac_addr[6]; +int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params); - void *shared_queue; - dma_addr_t shared_queue_phys_addr; -}; +int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_roce_qp_state prev_state, + struct qed_rdma_modify_qp_in_params *params); -#if IS_ENABLED(CONFIG_QED_RDMA) -void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -void qed_roce_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, union rdma_eqe_data *rdma_data); -void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); -void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); -void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, bool b_last_packet); -#else -static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - union rdma_eqe_data *rdma_data) {} -static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, - bool b_last_packet) {} -static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, - bool b_last_packet) {} -static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, - bool b_last_packet) {} -#endif #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 3357bbefa445..ab4ad8a1e2a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -104,12 +104,17 @@ union ramrod_data { struct roce_query_qp_req_ramrod_data roce_query_qp_req; struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; + struct roce_init_func_ramrod_data roce_init_func; struct rdma_create_cq_ramrod_data rdma_create_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; - struct roce_init_func_ramrod_data roce_init_func; + struct iwarp_create_qp_ramrod_data iwarp_create_qp; + struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload; + struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload; + struct iwarp_modify_qp_ramrod_data iwarp_modify_qp; + struct iwarp_init_func_ramrod_data iwarp_init_func; struct fcoe_init_ramrod_params fcoe_init; struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; @@ -120,6 +125,7 @@ union ramrod_data { struct iscsi_spe_func_dstry iscsi_destroy; struct iscsi_spe_conn_offload iscsi_conn_offload; struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_mac_update iscsi_conn_mac_update; struct iscsi_spe_conn_termination iscsi_conn_terminate; struct vf_start_ramrod_data vf_start; @@ -173,6 +179,22 @@ struct qed_consq { struct qed_chain chain; }; +typedef int +(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, + u8 fw_return_code); + +int +qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id, + qed_spq_async_comp_cb cb); + +void +qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id); + struct qed_spq { spinlock_t lock; /* SPQ lock */ @@ -202,6 +224,7 @@ struct qed_spq { u32 comp_count; u32 cid; + qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; }; /** @@ -270,28 +293,23 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param num_elem number of elements in the eq * - * @return struct qed_eq* - a newly allocated structure; NULL upon error. + * @return int */ -struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, - u16 num_elem); +int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the SPQ to its start state. + * @brief qed_eq_setup - Reset the EQ to its start state. * * @param p_hwfn - * @param p_eq */ -void qed_eq_setup(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq); +void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_deallocate - deallocates the given EQ struct. + * @brief qed_eq_free - deallocates the given EQ struct. * * @param p_hwfn - * @param p_eq */ -void qed_eq_free(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq); +void qed_eq_free(struct qed_hwfn *p_hwfn); /** * @brief qed_eq_prod_update - update the FW with default EQ producer @@ -342,28 +360,23 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); * * @param p_hwfn * - * @return struct qed_eq* - a newly allocated structure; NULL upon error. + * @return int */ -struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn); +int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start - * state. + * @brief qed_consq_setup - Reset the ConsQ to its start state. * * @param p_hwfn - * @param p_eq */ -void qed_consq_setup(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq); +void qed_consq_setup(struct qed_hwfn *p_hwfn); /** * @brief qed_consq_free - deallocates the given ConsQ struct. * * @param p_hwfn - * @param p_eq */ -void qed_consq_free(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq); +void qed_consq_free(struct qed_hwfn *p_hwfn); /** * @file @@ -401,6 +414,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * to the internal RAM of the UStorm by the Function Start Ramrod. * * @param p_hwfn + * @param p_ptt * @param p_tunn * @param mode * @param allow_npar_tx_switch @@ -409,6 +423,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch); @@ -426,6 +441,15 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** + * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * + * @param p_hwfn + * + * @return int + */ +int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); + +/** * @brief qed_sp_pf_stop - PF Function Stop Ramrod * * This ramrod is sent to close a Physical Function (PF). It is the last ramrod @@ -442,6 +466,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index bc3694e91b85..46d0c3cb83a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -185,22 +185,20 @@ static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, } static void -__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, +__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type) { *p_tunn_cls = tun_type->tun_cls; - - if (tun_type->b_mode_enabled) - *p_enable_tx_clas = 1; } static void -qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, +qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type, - u8 *p_update_port, __le16 *p_port, + u8 *p_update_port, + __le16 *p_port, struct qed_tunn_update_udp_port *p_udp_port) { - __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type); + __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type); if (p_udp_port->b_update_port) { *p_update_port = 1; *p_port = cpu_to_le16(p_udp_port->port); @@ -219,33 +217,27 @@ qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, - &p_tunn_cfg->tx_enable_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, - &p_tunn_cfg->tx_enable_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, - &p_tunn_cfg->tx_enable_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, - &p_tunn_cfg->tx_enable_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, - &p_tunn_cfg->tx_enable_ipgre, &p_tun->ip_gre); p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; - p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls; } static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, @@ -261,17 +253,18 @@ static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, } static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn) { if (p_tunn->vxlan_port.b_update_port) - qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, + qed_set_vxlan_dest_port(p_hwfn, p_ptt, p_tunn->vxlan_port.port); if (p_tunn->geneve_port.b_update_port) - qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, + qed_set_geneve_dest_port(p_hwfn, p_ptt, p_tunn->geneve_port.port); - qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn); + qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn); } static void @@ -289,33 +282,29 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, - &p_tunn_cfg->tx_enable_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, - &p_tunn_cfg->tx_enable_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, - &p_tunn_cfg->tx_enable_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, - &p_tunn_cfg->tx_enable_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, - &p_tunn_cfg->tx_enable_ipgre, &p_tun->ip_gre); } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch) { @@ -412,7 +401,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) - qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); + qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, + &p_hwfn->cdev->tunnel); return rc; } @@ -443,6 +433,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) @@ -477,7 +468,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, if (rc) return rc; - qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); + qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); return rc; } @@ -523,3 +514,27 @@ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } + +int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_mf_vlan_flag = true; + p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f6423a139ca0..be48d9abd001 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -54,7 +54,7 @@ #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" -#include "qed_roce.h" +#include "qed_rdma.h" /*************************************************************************** * Structures & Definitions @@ -302,32 +302,16 @@ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { - switch (p_eqe->protocol_id) { -#if IS_ENABLED(CONFIG_QED_RDMA) - case PROTOCOLID_ROCE: - qed_roce_async_event(p_hwfn, p_eqe->opcode, - &p_eqe->data.rdma_data); - return 0; -#endif - case PROTOCOLID_COMMON: - return qed_sriov_eqe_event(p_hwfn, - p_eqe->opcode, - p_eqe->echo, &p_eqe->data); - case PROTOCOLID_ISCSI: - if (!IS_ENABLED(CONFIG_QED_ISCSI)) - return -EINVAL; + qed_spq_async_comp_cb cb; - if (p_hwfn->p_iscsi_info->event_cb) { - struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) + return -EINVAL; - return p_iscsi->event_cb(p_iscsi->event_context, - p_eqe->opcode, &p_eqe->data); - } else { - DP_NOTICE(p_hwfn, - "iSCSI async completion is not set\n"); - return -EINVAL; - } - default: + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; + if (cb) { + return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, + &p_eqe->data, p_eqe->fw_return_code); + } else { DP_NOTICE(p_hwfn, "Unknown Async completion for protocol: %d\n", p_eqe->protocol_id); @@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, } } +int +qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id, + qed_spq_async_comp_cb cb) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return -EINVAL; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; + return 0; +} + +void +qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; +} + /*************************************************************************** * EQ API ***************************************************************************/ @@ -403,14 +409,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) return rc; } -struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) +int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { struct qed_eq *p_eq; /* Allocate EQ struct */ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) - return NULL; + return -ENOMEM; /* Allocate and initialize EQ chain*/ if (qed_chain_alloc(p_hwfn->cdev, @@ -419,31 +425,35 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), - &p_eq->chain)) + &p_eq->chain, NULL)) goto eq_allocate_fail; /* register EQ completion on the SP SB */ qed_int_register_cb(p_hwfn, qed_eq_completion, p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); - return p_eq; + p_hwfn->p_eq = p_eq; + return 0; eq_allocate_fail: - qed_eq_free(p_hwfn, p_eq); - return NULL; + kfree(p_eq); + return -ENOMEM; } -void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) +void qed_eq_setup(struct qed_hwfn *p_hwfn) { - qed_chain_reset(&p_eq->chain); + qed_chain_reset(&p_hwfn->p_eq->chain); } -void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) +void qed_eq_free(struct qed_hwfn *p_hwfn) { - if (!p_eq) + if (!p_hwfn->p_eq) return; - qed_chain_free(p_hwfn->cdev, &p_eq->chain); - kfree(p_eq); + + qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain); + + kfree(p_hwfn->p_eq); + p_hwfn->p_eq = NULL; } /*************************************************************************** @@ -543,7 +553,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), - &p_spq->chain)) + &p_spq->chain, NULL)) goto spq_allocate_fail; /* allocate and fill the SPQ elements (incl. ramrod data list) */ @@ -583,8 +593,8 @@ void qed_spq_free(struct qed_hwfn *p_hwfn) } qed_chain_free(p_hwfn->cdev, &p_spq->chain); - ; kfree(p_spq); + p_hwfn->p_spq = NULL; } int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) @@ -934,14 +944,14 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return rc; } -struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) +int qed_consq_alloc(struct qed_hwfn *p_hwfn) { struct qed_consq *p_consq; /* Allocate ConsQ struct */ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) - return NULL; + return -ENOMEM; /* Allocate and initialize EQ chain*/ if (qed_chain_alloc(p_hwfn->cdev, @@ -949,25 +959,29 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain)) + 0x80, &p_consq->chain, NULL)) goto consq_allocate_fail; - return p_consq; + p_hwfn->p_consq = p_consq; + return 0; consq_allocate_fail: - qed_consq_free(p_hwfn, p_consq); - return NULL; + kfree(p_consq); + return -ENOMEM; } -void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) +void qed_consq_setup(struct qed_hwfn *p_hwfn) { - qed_chain_reset(&p_consq->chain); + qed_chain_reset(&p_hwfn->p_consq->chain); } -void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) +void qed_consq_free(struct qed_hwfn *p_hwfn) { - if (!p_consq) + if (!p_hwfn->p_consq) return; - qed_chain_free(p_hwfn->cdev, &p_consq->chain); - kfree(p_consq); + + qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain); + + kfree(p_hwfn->p_consq); + p_hwfn->p_consq = NULL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f5ed54d611ec..2cfd3bd9a031 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -44,6 +44,26 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" +static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, u8 fw_return_code); + + +static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) +{ + u8 legacy = 0; + + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + legacy |= QED_QCID_LEGACY_VF_RX_PROD; + + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + legacy |= QED_QCID_LEGACY_VF_CID; + + return legacy; +} /* IOV ramrods */ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) @@ -178,6 +198,19 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return vf; } +static struct qed_queue_cid * +qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) +{ + int i; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) + return p_queue->cids[i].p_cid; + } + + return NULL; +} + enum qed_iov_validate_q_mode { QED_IOV_VALIDATE_Q_NA, QED_IOV_VALIDATE_Q_ENABLE, @@ -190,12 +223,24 @@ static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, enum qed_iov_validate_q_mode mode, bool b_is_tx) { + int i; + if (mode == QED_IOV_VALIDATE_Q_NA) return true; - if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) || - (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid)) + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + struct qed_vf_queue_cid *p_qcid; + + p_qcid = &p_vf->vf_queues[qid].cids[i]; + + if (!p_qcid->p_cid) + continue; + + if (p_qcid->b_is_tx != b_is_tx) + continue; + return mode == QED_IOV_VALIDATE_Q_ENABLE; + } /* In case we haven't found any valid cid, then its disabled */ return mode == QED_IOV_VALIDATE_Q_DISABLE; @@ -378,33 +423,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev) return 0; } -static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - struct qed_igu_block *p_sb; - u16 sb_id; - u32 val; - - if (!p_hwfn->hw_info.p_igu_info) { - DP_ERR(p_hwfn, - "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); - return; - } - - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); - sb_id++) { - p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; - if ((p_sb->status & QED_IGU_STATUS_FREE) && - !(p_sb->status & QED_IGU_STATUS_PF)) { - val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sb_id * 4); - SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); - qed_wr(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); - } - } -} - static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; @@ -552,20 +570,24 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) p_hwfn->pf_iov_info = p_sriov; + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_sriov_eqe_event); + return qed_iov_allocate_vfdb(p_hwfn); } -void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +void qed_iov_setup(struct qed_hwfn *p_hwfn) { if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) return; qed_iov_setup_vfdb(p_hwfn); - qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); } void qed_iov_free(struct qed_hwfn *p_hwfn) { + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { qed_iov_free_vfdb(p_hwfn); kfree(p_hwfn->pf_iov_info); @@ -747,6 +769,35 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); } +static int +qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) +{ + u8 current_max = 0; + int i; + + /* For AH onward, configuration is per-PF. Find maximum of all + * the currently enabled child VFs, and set the number to be that. + */ + if (!QED_IS_BB(p_hwfn->cdev)) { + qed_for_each_vf(p_hwfn, i) { + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); + if (!p_vf) + continue; + + current_max = max_t(u8, current_max, p_vf->num_sbs); + } + } + + if (num_sbs > current_max) + return qed_mcp_config_vf_msix(p_hwfn, p_ptt, + abs_vf_id, num_sbs); + + return 0; +} + static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -771,7 +822,8 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); - rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); + rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, + vf->abs_vf_id, vf->num_sbs); if (rc) return rc; @@ -838,45 +890,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u16 num_rx_queues) { - struct qed_igu_block *igu_blocks; - int qid = 0, igu_id = 0; + struct qed_igu_block *p_block; + struct cau_sb_entry sb_entry; + int qid = 0; u32 val = 0; - igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; - - if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) - num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; - p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); - while ((qid < num_rx_queues) && - (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { - if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { - struct cau_sb_entry sb_entry; - - vf->igu_sbs[qid] = (u16)igu_id; - igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; - - SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); - - qed_wr(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, - val); - - /* Configure igu sb in CAU which were marked valid */ - qed_init_cau_sb_entry(p_hwfn, &sb_entry, - p_hwfn->rel_pf_id, - vf->abs_vf_id, 1); - qed_dmae_host2grc(p_hwfn, p_ptt, - (u64)(uintptr_t)&sb_entry, - CAU_REG_SB_VAR_MEMORY + - igu_id * sizeof(u64), 2, 0); - qid++; - } - igu_id++; + for (qid = 0; qid < num_rx_queues; qid++) { + p_block = qed_get_igu_free_sb(p_hwfn, false); + vf->igu_sbs[qid] = p_block->igu_sb_id; + p_block->status &= ~QED_IGU_STATUS_FREE; + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * p_block->igu_sb_id, val); + + /* Configure igu sb in CAU which were marked valid */ + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, vf->abs_vf_id, 1); + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + p_block->igu_sb_id * sizeof(u64), 2, 0); } vf->num_sbs = (u8) num_rx_queues; @@ -901,10 +944,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); qed_wr(p_hwfn, p_ptt, addr, val); - p_info->igu_map.igu_blocks[igu_id].status |= - QED_IGU_STATUS_FREE; - - p_hwfn->hw_info.p_igu_info->free_blks++; + p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; } vf->num_sbs = 0; @@ -1028,20 +1069,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf->num_txqs = num_of_vf_avaiable_chains; for (i = 0; i < vf->num_rxqs; i++) { - struct qed_vf_q_info *p_queue = &vf->vf_queues[i]; + struct qed_vf_queue *p_queue = &vf->vf_queues[i]; p_queue->fw_rx_qid = p_params->req_rx_queue[i]; p_queue->fw_tx_qid = p_params->req_tx_queue[i]; - /* CIDs are per-VF, so no problem having them 0-based. */ - p_queue->fw_cid = i; - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n", - vf->relative_vf_id, - i, vf->igu_sbs[i], - p_queue->fw_rx_qid, - p_queue->fw_tx_qid, p_queue->fw_cid); + "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", + vf->relative_vf_id, i, vf->igu_sbs[i], + p_queue->fw_rx_qid, p_queue->fw_tx_qid); } /* Update the link configuration in bulletin */ @@ -1328,7 +1364,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { - u32 i; + u32 i, j; p_vf->vf_bulletin = 0; p_vf->vport_instance = 0; @@ -1341,16 +1377,15 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->num_active_rxqs = 0; for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { - struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i]; + struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; - if (p_queue->p_rx_cid) { - qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); - p_queue->p_rx_cid = NULL; - } + for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { + if (!p_queue->cids[j].p_cid) + continue; - if (p_queue->p_tx_cid) { - qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); - p_queue->p_tx_cid = NULL; + qed_eth_queue_cid_release(p_hwfn, + p_queue->cids[j].p_cid); + p_queue->cids[j].p_cid = NULL; } } @@ -1359,13 +1394,67 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); } +/* Returns either 0, or log(size) */ +static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); + + if (val) + return val + 11; + return 0; +} + +static void +qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; + u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - + qed_db_addr_vf(0, DQ_DEMS_LEGACY); + u32 bar_size; + + p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); + + /* If VF didn't bother asking for QIDs than don't bother limiting + * number of CIDs. The VF doesn't care about the number, and this + * has the likely result of causing an additional acquisition. + */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount + * that would make sure doorbells for all CIDs fall within the bar. + * If it doesn't, make sure regview window is sufficient. + */ + if (p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { + bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); + if (bar_size) + bar_size = 1 << bar_size; + + if (p_hwfn->cdev->num_hwfns > 1) + bar_size /= 2; + } else { + bar_size = PXP_VF_BAR0_DQ_LENGTH; + } + + if (bar_size / db_size < 256) + p_resp->num_cids = min_t(u8, p_resp->num_cids, + (u8)(bar_size / db_size)); +} + static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, struct vf_pf_resc_request *p_req, struct pf_vf_resc *p_resp) { - int i; + u8 i; /* Queue related information */ p_resp->num_rxqs = p_vf->num_rxqs; @@ -1383,7 +1472,7 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, for (i = 0; i < p_resp->num_rxqs; i++) { qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, (u16 *)&p_resp->hw_qid[i]); - p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; + p_resp->cid[i] = i; } /* Filter related information */ @@ -1392,6 +1481,8 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, p_req->num_vlan_filters); + qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); + /* This isn't really needed/enforced, but some legacy VFs might depend * on the correct filling of this field. */ @@ -1403,10 +1494,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_sbs < p_req->num_sbs || p_resp->num_mac_filters < p_req->num_mac_filters || p_resp->num_vlan_filters < p_req->num_vlan_filters || - p_resp->num_mc_filters < p_req->num_mc_filters) { + p_resp->num_mc_filters < p_req->num_mc_filters || + p_resp->num_cids < p_req->num_cids) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", p_vf->abs_vf_id, p_req->num_rxqs, p_resp->num_rxqs, @@ -1418,7 +1510,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); /* Some legacy OSes are incapable of correctly handling this * failure. @@ -1534,6 +1628,15 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + /* Share our ability to use multiple queue-ids only with VFs + * that request it. + */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; + + /* Share the sizes of the bars with VF */ + resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1758,9 +1861,11 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, /* Update all the Rx queues */ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { - struct qed_queue_cid *p_cid; + struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; + struct qed_queue_cid *p_cid = NULL; - p_cid = p_vf->vf_queues[i].p_rx_cid; + /* There can be at most 1 Rx queue on qzone. Find it */ + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); if (!p_cid) continue; @@ -1951,16 +2056,55 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } +static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, bool b_is_tx) +{ + struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Search for the qid if the VF published its going to provide it */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { + if (b_is_tx) + return QED_IOV_LEGACY_QID_TX; + else + return QED_IOV_LEGACY_QID_RX; + } + + p_qid_tlv = (struct vfpf_qid_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + CHANNEL_TLV_QID); + if (!p_qid_tlv) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%2x]: Failed to provide qid\n", + p_vf->relative_vf_id); + + return QED_IOV_QID_INVALID; + } + + if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Provided qid out-of-bounds %02x\n", + p_vf->relative_vf_id, p_qid_tlv->qid); + return QED_IOV_QID_INVALID; + } + + return p_qid_tlv->qid; +} + static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_queue_start_common_params params; + struct qed_queue_cid_vf_params vf_params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; - struct qed_vf_q_info *p_queue; + u8 qid_usage_idx, vf_legacy = 0; struct vfpf_start_rxq_tlv *req; - bool b_legacy_vf = false; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + struct qed_sb_info sb_dummy; int rc; req = &mbx->req_virt->start_rxq; @@ -1970,53 +2114,64 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - /* Acquire a new queue-cid */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->rx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = qed_vf_calculate_legacy(vf); + /* Acquire a new queue-cid */ memset(¶ms, 0, sizeof(params)); params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; - p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, - vf->opaque_fid, - p_queue->fw_cid, - req->rx_qid, ¶ms); - if (!p_queue->p_rx_cid) + memset(&vf_params, 0, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->rx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, true, &vf_params); + if (!p_cid) goto out; /* Legacy VFs have their Producers in a different location, which they * calculate on their own and clean the producer prior to this. */ - if (vf->acquire.vfdev_info.eth_fp_hsi_minor == - ETH_HSI_VER_NO_PKT_LEN_TUNN) { - b_legacy_vf = true; - } else { + if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) REG_WR(p_hwfn, GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 0); - } - p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf; - rc = qed_eth_rxq_start_ramrod(p_hwfn, - p_queue->p_rx_cid, + rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, req->bd_max_bytes, req->rxq_addr, req->cqe_pbl_addr, req->cqe_pbl_size); if (rc) { status = PFVF_STATUS_FAILURE; - qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); - p_queue->p_rx_cid = NULL; + qed_eth_queue_cid_release(p_hwfn, p_cid); } else { + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = false; status = PFVF_STATUS_SUCCESS; vf->num_active_rxqs++; } out: - qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, + !!(vf_legacy & + QED_QCID_LEGACY_VF_RX_PROD)); } static void @@ -2209,7 +2364,7 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, if (b_update_required) { u16 geneve_port; - rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn, + rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, QED_SPQ_MODE_EBLOCK, NULL); if (rc) status = PFVF_STATUS_FAILURE; @@ -2235,7 +2390,8 @@ send_resp: static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_vf_info *p_vf, u8 status) + struct qed_vf_info *p_vf, + u32 cid, u8 status) { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; @@ -2263,12 +2419,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ - if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { - u16 qid = mbx->req_virt->start_txq.tx_qid; - - p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, - DQ_DEMS_LEGACY); - } + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) + p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); } @@ -2278,10 +2430,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf) { struct qed_queue_start_common_params params; + struct qed_queue_cid_vf_params vf_params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_txq_tlv *req; - struct qed_vf_q_info *p_queue; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + struct qed_sb_info sb_dummy; + u8 qid_usage_idx, vf_legacy; + u32 cid = 0; int rc; u16 pq; @@ -2289,89 +2446,126 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, req = &mbx->req_virt->start_txq; if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, - QED_IOV_VALIDATE_Q_DISABLE) || + QED_IOV_VALIDATE_Q_NA) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - /* Acquire a new queue-cid */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->tx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = qed_vf_calculate_legacy(vf); + /* Acquire a new queue-cid */ params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; - p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, - vf->opaque_fid, - p_queue->fw_cid, - req->tx_qid, ¶ms); - if (!p_queue->p_tx_cid) + memset(&vf_params, 0, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->tx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + + p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, false, &vf_params); + if (!p_cid) goto out; pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); - rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, + rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, req->pbl_addr, req->pbl_size, pq); if (rc) { status = PFVF_STATUS_FAILURE; - qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); - p_queue->p_tx_cid = NULL; + qed_eth_queue_cid_release(p_hwfn, p_cid); } else { status = PFVF_STATUS_SUCCESS; + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = true; + cid = p_cid->cid; } out: - qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); + qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); } static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, - u16 rxq_id, bool cqe_completion) + u16 rxq_id, + u8 qid_usage_idx, bool cqe_completion) { - struct qed_vf_q_info *p_queue; + struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, - QED_IOV_VALIDATE_Q_ENABLE)) { + if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] Tried Closing Rx 0x%04x which is inactive\n", - vf->relative_vf_id, rxq_id); + "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", + vf->relative_vf_id, rxq_id, qid_usage_idx); return -EINVAL; } p_queue = &vf->vf_queues[rxq_id]; + /* We've validated the index and the existence of the active RXQ - + * now we need to make sure that it's using the correct qid. + */ + if (!p_queue->cids[qid_usage_idx].p_cid || + p_queue->cids[qid_usage_idx].b_is_tx) { + struct qed_queue_cid *p_cid; + + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", + vf->relative_vf_id, + rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); + return -EINVAL; + } + + /* Now that we know we have a valid Rx-queue - close it */ rc = qed_eth_rx_queue_stop(p_hwfn, - p_queue->p_rx_cid, + p_queue->cids[qid_usage_idx].p_cid, false, cqe_completion); if (rc) return rc; - p_queue->p_rx_cid = NULL; + p_queue->cids[qid_usage_idx].p_cid = NULL; vf->num_active_rxqs--; return 0; } static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, - struct qed_vf_info *vf, u16 txq_id) + struct qed_vf_info *vf, + u16 txq_id, u8 qid_usage_idx) { - struct qed_vf_q_info *p_queue; + struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, - QED_IOV_VALIDATE_Q_ENABLE)) + if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) return -EINVAL; p_queue = &vf->vf_queues[txq_id]; + if (!p_queue->cids[qid_usage_idx].p_cid || + !p_queue->cids[qid_usage_idx].b_is_tx) + return -EINVAL; - rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); + rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); if (rc) return rc; - p_queue->p_tx_cid = NULL; - + p_queue->cids[qid_usage_idx].p_cid = NULL; return 0; } @@ -2383,6 +2577,7 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_rxqs_tlv *req; + u8 qid_usage_idx; int rc; /* There has never been an official driver that used this interface @@ -2398,8 +2593,13 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, goto out; } + /* Find which qid-index is associated with the queue */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, - req->cqe_completion); + qid_usage_idx, req->cqe_completion); if (!rc) status = PFVF_STATUS_SUCCESS; out: @@ -2415,6 +2615,7 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_txqs_tlv *req; + u8 qid_usage_idx; int rc; /* There has never been an official driver that used this interface @@ -2429,7 +2630,13 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_NOT_SUPPORTED; goto out; } - rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid); + + /* Find which qid-index is associated with the queue */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); if (!rc) status = PFVF_STATUS_SUCCESS; @@ -2449,7 +2656,7 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_FAILURE; u8 complete_event_flg; u8 complete_cqe_flg; - u16 qid; + u8 qid_usage_idx; int rc; u8 i; @@ -2457,19 +2664,42 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); - /* Validate inputs */ - for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + /* There shouldn't exist a VF that uses queue-qids yet uses this + * API with multiple Rx queues. Validate this. + */ + if ((vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] supports QIDs but sends multiple queues\n", + vf->relative_vf_id); + goto out; + } + + /* Validate inputs - for the legacy case this is still true since + * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. + */ + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { if (!qed_iov_validate_rxq(p_hwfn, vf, i, - QED_IOV_VALIDATE_Q_ENABLE)) { - DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", - vf->relative_vf_id, req->rx_qid, req->num_rxqs); + QED_IOV_VALIDATE_Q_NA) || + !vf->vf_queues[i].cids[qid_usage_idx].p_cid || + vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, + req->num_rxqs); goto out; } + } /* Prepare the handlers */ for (i = 0; i < req->num_rxqs; i++) { - qid = req->rx_qid + i; - handlers[i] = vf->vf_queues[qid].p_rx_cid; + u16 qid = req->rx_qid + i; + + handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; } rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, @@ -2683,6 +2913,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, (1 << p_rss_tlv->rss_table_size_log)); for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_cid; + q_idx = p_rss_tlv->rss_ind_table[i]; if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, QED_IOV_VALIDATE_Q_ENABLE)) { @@ -2694,7 +2926,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, goto out; } - p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); + p_rss->rss_ind_table[i] = p_cid; } p_data->rss_params = p_rss; @@ -3610,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, __le16 echo, union event_ring_data *data) +static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 81a497ce6585..c2e44bce398c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -149,12 +149,21 @@ struct qed_iov_vf_mbx { struct vfpf_first_tlv first_tlv; }; -struct qed_vf_q_info { +#define QED_IOV_LEGACY_QID_RX (0) +#define QED_IOV_LEGACY_QID_TX (1) +#define QED_IOV_QID_INVALID (0xFE) + +struct qed_vf_queue_cid { + bool b_is_tx; + struct qed_queue_cid *p_cid; +}; + +/* Describes a qzone associated with the VF */ +struct qed_vf_queue { u16 fw_rx_qid; - struct qed_queue_cid *p_rx_cid; u16 fw_tx_qid; - struct qed_queue_cid *p_tx_cid; - u8 fw_cid; + + struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE]; }; enum vf_state { @@ -212,7 +221,8 @@ struct qed_vf_info { u8 num_mac_filters; u8 num_vlan_filters; - struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; + + struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF]; u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; u8 num_active_rxqs; struct qed_public_vf_info p_vf_info; @@ -316,9 +326,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn); * @brief qed_iov_setup - setup sriov related resources * * @param p_hwfn - * @param p_ptt */ -void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_iov_setup(struct qed_hwfn *p_hwfn); /** * @brief qed_iov_free - free sriov related resources @@ -335,17 +344,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn); void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe. - * - * @param p_hwfn - * @param opcode - * @param echo - * @param data - */ -int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, __le16 echo, union event_ring_data *data); - -/** * @brief Mark structs of vfs that have been FLR-ed. * * @param p_hwfn @@ -397,7 +395,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) return 0; } -static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +static inline void qed_iov_setup(struct qed_hwfn *p_hwfn) { } @@ -409,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev) { } -static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, - __le16 echo, union event_ring_data *data) -{ - return -EINVAL; -} - static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 11d71e5eea14..1926d1ed439f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -153,6 +153,77 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) return rc; } +static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Only add QIDs for the queue if it was negotiated with PF */ + if (!(p_iov->acquire_resp.pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); + p_qid_tlv->qid = p_cid->qid_usage_idx; +} + +int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + u32 size; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = -EAGAIN; + + qed_vf_pf_req_end(p_hwfn, rc); + if (!b_final) + return rc; + + p_hwfn->b_int_enabled = 0; + + if (p_iov->vf2pf_request) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys); + if (p_iov->pf2vf_reply) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct qed_bulletin_content); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + size, + p_iov->bulletin.p_virt, p_iov->bulletin.phys); + } + + kfree(p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = NULL; + + return rc; +} + +int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + return _qed_vf_pf_release(p_hwfn, true); +} + #define VF_ACQUIRE_THRESH 3 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, struct vf_pf_resc_request *p_req, @@ -160,7 +231,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, @@ -171,7 +242,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); /* humble our request */ p_req->num_txqs = p_resp->num_txqs; @@ -180,6 +252,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_req->num_mac_filters = p_resp->num_mac_filters; p_req->num_vlan_filters = p_resp->num_vlan_filters; p_req->num_mc_filters = p_resp->num_mc_filters; + p_req->num_cids = p_resp->num_cids; } static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) @@ -204,6 +277,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; @@ -216,6 +290,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* Fill capability field with any non-deprecated config we support */ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; + /* If we've mapped the doorbell bar, try using queue qids */ + if (p_iov->b_doorbell_bar) { + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | + VFPF_ACQUIRE_CAP_QUEUE_QIDS; + p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; + } + /* pf 2 vf bulletin board address */ req->bulletin_addr = p_iov->bulletin.phys; req->bulletin_size = p_iov->bulletin.size; @@ -307,6 +388,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) p_iov->b_pre_fp_hsi = true; + /* In case PF doesn't support multi-queue Tx, update the number of + * CIDs to reflect the number of queues [older PFs didn't fill that + * field]. + */ + if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; + /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; @@ -338,10 +426,27 @@ exit: return rc; } +u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) +{ + u32 bar_size; + + /* Regview size is fixed */ + if (bar_id == BAR_ID_0) + return 1 << 17; + + /* Doorbell is received from PF */ + bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; + if (bar_size) + return 1 << bar_size; + return 0; +} + int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) { + struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev); struct qed_vf_iov *p_iov; u32 reg; + int rc; /* Set number of hwfns - might be overriden once leading hwfn learns * actual configuration from PF. @@ -349,10 +454,6 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) if (IS_LEAD_HWFN(p_hwfn)) p_hwfn->cdev->num_hwfns = 1; - /* Set the doorbell bar. Assumption: regview is set */ - p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + - PXP_VF_BAR0_START_DQ; - reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); @@ -364,6 +465,30 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) if (!p_iov) return -ENOMEM; + /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell + * value, but there are several incompatibily scenarios where that + * would be incorrect and we'd need to override it. + */ + if (!p_hwfn->doorbells) { + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + } else if (p_hwfn == p_lead) { + /* For leading hw-function, value is always correct, but need + * to handle scenario where legacy PF would not support 100g + * mapped bars later. + */ + p_iov->b_doorbell_bar = true; + } else { + /* here, value would be correct ONLY if the leading hwfn + * received indication that mapped-bars are supported. + */ + if (p_lead->vf_iov_info->b_doorbell_bar) + p_iov->b_doorbell_bar = true; + else + p_hwfn->doorbells = (u8 __iomem *) + p_hwfn->regview + PXP_VF_BAR0_START_DQ; + } + /* Allocate vf2pf msg */ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), @@ -403,7 +528,33 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_hwfn->hw_info.personality = QED_PCI_ETH; - return qed_vf_pf_acquire(p_hwfn); + rc = qed_vf_pf_acquire(p_hwfn); + + /* If VF is 100g using a mapped bar and PF is too old to support that, + * acquisition would succeed - but the VF would have no way knowing + * the size of the doorbell bar configured in HW and thus will not + * know how to split it for 2nd hw-function. + * In this case we re-try without the indication of the mapped + * doorbell. + */ + if (!rc && p_iov->b_doorbell_bar && + !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) && + (p_hwfn->cdev->num_hwfns > 1)) { + rc = _qed_vf_pf_release(p_hwfn, false); + if (rc) + return rc; + + p_iov->b_doorbell_bar = false; + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + rc = qed_vf_pf_acquire(p_hwfn); + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", + p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells); + + return rc; free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -588,8 +739,8 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; @@ -609,6 +760,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); } + + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -657,6 +811,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, req->num_rxqs = 1; req->cqe_completion = cqe_completion; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -697,8 +853,10 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; + + qed_vf_pf_add_qid(p_hwfn, p_cid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -728,8 +886,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - qid, *pp_doorbell, resp->offset); + "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", + qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); @@ -749,6 +907,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -792,9 +952,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, req->only_untagged = only_untagged; /* status blocks */ - for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) - if (p_hwfn->sbs_info[i]) - req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { + struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; + + if (p_sb) + req->sb_addr[i] = p_sb->sb_phys; + } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -1095,54 +1258,6 @@ exit: return rc; } -int qed_vf_pf_release(struct qed_hwfn *p_hwfn) -{ - struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; - struct pfvf_def_resp_tlv *resp; - struct vfpf_first_tlv *req; - u32 size; - int rc; - - /* clear mailbox and prep first tlv */ - req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); - - /* add list termination tlv */ - qed_add_tlv(p_hwfn, &p_iov->offset, - CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - - resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); - - if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) - rc = -EAGAIN; - - qed_vf_pf_req_end(p_hwfn, rc); - - p_hwfn->b_int_enabled = 0; - - if (p_iov->vf2pf_request) - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(union vfpf_tlvs), - p_iov->vf2pf_request, - p_iov->vf2pf_request_phys); - if (p_iov->pf2vf_reply) - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(union pfvf_tlvs), - p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); - - if (p_iov->bulletin.p_virt) { - size = sizeof(struct qed_bulletin_content); - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - size, - p_iov->bulletin.p_virt, p_iov->bulletin.phys); - } - - kfree(p_hwfn->vf_iov_info); - p_hwfn->vf_iov_info = NULL; - - return rc; -} - void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd) { @@ -1240,6 +1355,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; } +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return; + } + + if (sb_id >= PFVF_MAX_SBS_PER_VF) { + DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id); + return; + } + + p_iov->sbs_info[sb_id] = p_sb; +} + int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; @@ -1342,6 +1475,16 @@ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; } +void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) +{ + *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; +} + +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ + *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; +} + void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { memcpy(port_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 34ac70b0e5fe..34d9b882a780 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -46,7 +46,8 @@ struct vf_pf_resc_request { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u16 padding; + u8 num_cids; + u8 padding; }; struct hw_sb_info { @@ -113,6 +114,17 @@ struct vfpf_acquire_tlv { struct vf_pf_vfdev_info { #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + /* A requirement for supporting multi-Tx queues on a single queue-zone, + * VF would pass qids as additional information whenever passing queue + * references. + */ +#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2) + + /* The VF is using the physical bar. While this is mostly internal + * to the VF, might affect the number of CIDs supported assuming + * QUEUE_QIDS is set. + */ +#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3) u64 capabilities; u8 fw_major; u8 fw_minor; @@ -185,6 +197,9 @@ struct pfvf_acquire_resp_tlv { */ #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + /* PF expects queues to be received with additional qids */ +#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3) + u16 db_size; u8 indices_per_sb; u8 os_type; @@ -193,7 +208,8 @@ struct pfvf_acquire_resp_tlv { u16 chip_rev; u8 dev_type; - u8 padding; + /* Doorbell bar size configured in HW: log(size) or 0 */ + u8 bar_size; struct pfvf_stats_info stats_info; @@ -221,7 +237,8 @@ struct pfvf_acquire_resp_tlv { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u8 padding[2]; + u8 num_cids; + u8 padding; } resc; u32 bulletin_size; @@ -234,6 +251,16 @@ struct pfvf_start_queue_resp_tlv { u8 padding[4]; }; +/* Extended queue information - additional index for reference inside qzone. + * If commmunicated between VF/PF, each TLV relating to queues should be + * extended by one such [or have a future base TLV that already contains info]. + */ +struct vfpf_qid_tlv { + struct channel_tlv tl; + u8 qid; + u8 padding[3]; +}; + /* Setup Queue */ struct vfpf_start_rxq_tlv { struct vfpf_first_tlv first_tlv; @@ -597,6 +624,8 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, + CHANNEL_TLV_RESERVED, + CHANNEL_TLV_QID, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -605,6 +634,12 @@ enum { CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, }; +/* Default number of CIDs [total of both Rx and Tx] to be requested + * by default, and maximum possible number. + */ +#define QED_ETH_VF_DEFAULT_NUM_CIDS (32) +#define QED_ETH_VF_MAX_NUM_CIDS (250) + /* This data is held in the qed_hwfn structure for VFs only. */ struct qed_vf_iov { union vfpf_tlvs *vf2pf_request; @@ -627,6 +662,19 @@ struct qed_vf_iov { * this has to be propagated as it affects the fastpath. */ bool b_pre_fp_hsi; + + /* Current day VFs are passing the SBs physical address on vport + * start, and as they lack an IGU mapping they need to store the + * addresses of previously registered SBs. + * Even if we were to change configuration flow, due to backward + * compatibility [with older PFs] we'd still need to store these. + */ + struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF]; + + /* Determines whether VF utilizes doorbells via limited register + * bar or via the doorbell bar. + */ + bool b_doorbell_bar; }; #ifdef CONFIG_QED_SRIOV @@ -676,6 +724,22 @@ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** + * @brief Get number of Rx queues allocated for VF by qed + * + * @param p_hwfn + * @param num_txqs - allocated RX queues + */ +void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); + +/** + * @brief Get number of available connections [both Rx and Tx] for VF + * + * @param p_hwfn + * @param num_cids - allocated number of connections + */ +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); + +/** * @brief Get port mac address for VF * * @param p_hwfn @@ -837,6 +901,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** + * @brief Stores [or removes] a configured sb_info. + * + * @param p_hwfn + * @param sb_id - zero-based SB index [for fastpath] + * @param sb_info - may be NULL [during removal]. + */ +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb); + +/** * @brief qed_vf_pf_vport_start - perform vport start for VF. * * @param p_hwfn @@ -917,6 +991,8 @@ void qed_iov_vf_task(struct work_struct *work); void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun); int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tunn); + +u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) @@ -938,6 +1014,14 @@ static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) { } +static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) +{ +} + +static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ +} + static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { } @@ -1021,6 +1105,11 @@ static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return 0; } +static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, + struct qed_sb_info *p_sb) +{ +} + static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, u16 mtu, @@ -1089,6 +1178,13 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, { return -EINVAL; } + +static inline u32 +qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, + enum BAR_ID bar_id) +{ + return 0; +} #endif #endif |