summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c169
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1438
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c137
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c132
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c255
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c162
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c179
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c231
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h39
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c290
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c475
22 files changed, 2362 insertions, 1633 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 45ab74676573..2d67469eb8f6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,7 +26,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.1.20"
+#define DRV_MODULE_VERSION "8.10.9.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -42,6 +42,8 @@ enum qed_coalescing_mode {
struct qed_eth_cb_ops;
struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
@@ -606,7 +608,9 @@ void qed_link_update(struct qed_hwfn *hwfn);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
-
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1c35f376143e..547692759d06 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -377,9 +377,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +404,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
return cnt;
}
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type, u32 count, bool has_fl)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +419,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
- u32 start_line, u32 total_size,
- u32 elem_size)
+ u32 start_line, u32 total_size, u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
@@ -448,8 +446,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_cli->first.val = *p_line;
p_cli->active = true;
- *p_line += DIV_ROUND_UP(p_blk->total_size,
- p_blk->real_size_in_page);
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -926,12 +923,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
u32 size;
- size = min_t(u32, sz_left,
- p_blk->real_size_in_page);
+ size = min_t(u32, sz_left, p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- &p_phys,
- GFP_KERNEL);
+ size, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
@@ -976,7 +970,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +979,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
}
@@ -1672,7 +1666,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
tm_offset += tm_iids.pf_tids[i];
}
@@ -1702,8 +1696,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid)
+ enum protocol_type type, u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
@@ -1717,8 +1710,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
- DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
- type);
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
@@ -1730,8 +1722,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid,
- enum protocol_type *p_type)
+ u32 cid, enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1754,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
return true;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
@@ -1781,8 +1771,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
- struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 226cb08cc055..b900dfbb57ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1968,6 +1968,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
if (!dcbx_info->operational.ieee) {
DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0e4f4a9306b5..5ae27f2d2fa5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -35,8 +35,7 @@
#include "qed_sriov.h"
#include "qed_vf.h"
-static spinlock_t qm_lock;
-static bool qm_lock_init = false;
+static DEFINE_SPINLOCK(qm_lock);
/* API common to all protocols */
enum BAR_ID {
@@ -44,8 +43,7 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -70,8 +68,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
}
}
-void qed_init_dp(struct qed_dev *cdev,
- u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
{
u32 i;
@@ -543,8 +540,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
if (!cdev->reset_stats) {
DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
return 0;
@@ -605,9 +601,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
- DP_NOTICE(
- p_hwfn,
- "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ DP_NOTICE(p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -701,17 +696,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_block->function_id,
- 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
- sb_entry);
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
}
}
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+ struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
@@ -759,7 +751,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_port_unpretend(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
- if (rc != 0)
+ if (rc)
return rc;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -780,6 +772,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
@@ -788,37 +783,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
}
static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+ struct qed_ptt *p_ptt, int hw_mode)
{
- int rc = 0;
-
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
- if (rc != 0)
- return rc;
-
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
-
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%08x] is first eth on engine\n", pf_id);
-
- /* We should have configured BIT for ppfid, i.e., the
- * relative function number in the port. But there's a
- * bug in LLH in BB where the ppfid is actually engine
- * based, so we need to take this into account.
- */
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
- }
-
- /* Take the protocol-based hit vector if there is a hit,
- * otherwise take the other vector.
- */
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
- }
- return rc;
+ return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
+ p_hwfn->port_id, hw_mode);
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -848,7 +816,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_init_rt(p_hwfn);
/* Set VLAN in NIG if needed */
- if (hw_mode & (1 << MODE_MF_SD)) {
+ if (hw_mode & BIT(MODE_MF_SD)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -856,7 +824,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Enable classification by MAC if needed */
- if (hw_mode & (1 << MODE_MF_SI)) {
+ if (hw_mode & BIT(MODE_MF_SI)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Configuring TAGMAC_CLS_TYPE\n");
STORE_RT_REG(p_hwfn,
@@ -871,7 +839,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != 0)
+ if (rc)
return rc;
/* PF Init sequence */
@@ -887,21 +855,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
- u32 val = 0;
-
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- if (p_hwfn->rel_pf_id == pf_id) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%d] is first ETH on engine\n",
- pf_id);
- val = 1;
- }
- qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
- }
- }
-
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -950,8 +903,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_main_ptt);
memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
- p_hwfn->mcp_info->mfw_mb_cur,
- p_hwfn->mcp_info->mfw_mb_length);
+ p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
int qed_hw_init(struct qed_dev *cdev,
@@ -971,7 +923,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
+ if (rc)
return rc;
}
@@ -988,8 +940,7 @@ int qed_hw_init(struct qed_dev *cdev,
qed_calc_hw_mode(p_hwfn);
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
- &load_code);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
return rc;
@@ -1004,11 +955,6 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
- spin_lock_init(&qm_lock);
- qm_lock_init = true;
- }
-
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1071,9 +1017,8 @@ int qed_hw_init(struct qed_dev *cdev,
}
#define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int i;
@@ -1084,8 +1029,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
+ (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
/* Dependent on number of connection/tasks, possibly
@@ -1190,8 +1134,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
}
DP_VERBOSE(p_hwfn,
- NETIF_MSG_IFDOWN,
- "Shutting down the fastpath\n");
+ NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1219,14 +1162,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static int qed_reg_assert(struct qed_hwfn *hwfn,
- struct qed_ptt *ptt, u32 reg,
- bool expected)
+static int qed_reg_assert(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 reg, bool expected)
{
- u32 assert_val = qed_rd(hwfn, ptt, reg);
+ u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
if (assert_val != expected) {
- DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
reg, expected);
return -EINVAL;
}
@@ -1306,8 +1248,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1317,7 +1258,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -1326,6 +1268,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
PXP_CONCRETE_FID_PFID);
p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
}
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
@@ -1417,8 +1363,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
return 0;
}
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
@@ -1472,8 +1417,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
- core_cfg);
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
}
@@ -1484,11 +1428,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port, speed_cap_mask));
- link->speed.advertised_speeds =
- link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities =
- link->speed.advertised_speeds;
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
@@ -1517,8 +1461,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link->speed.forced_speed = 100000;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
- link_temp);
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1628,10 +1571,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
- p_hwfn->num_funcs_on_engine);
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
static int
@@ -1703,10 +1646,9 @@ static int qed_get_dev_info(struct qed_dev *cdev)
u32 tmp;
/* Read Vendor Id / Device Id */
- pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
- &cdev->vendor_id);
- pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
- &cdev->device_id);
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -1782,7 +1724,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
- if (rc != 0)
+ if (rc)
goto err1;
}
@@ -2183,8 +2125,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
return 0;
}
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
u8 min, max;
@@ -2203,8 +2144,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
u8 min, max;
@@ -2386,8 +2326,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
* 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
*/
static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
- u16 vport_id, u32 req_rate,
- u32 min_pf_rate)
+ u16 vport_id, u32 req_rate, u32 min_pf_rate)
{
u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2471,7 +2410,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
- if (rc == 0)
+ if (!rc)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 6f9d3b831a2a..a67b3554aabd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -536,6 +536,244 @@ struct core_conn_context {
struct regpair ustorm_st_padding[2];
};
+enum core_error_handle {
+ LL2_DROP_PACKET,
+ LL2_DO_NOTHING,
+ LL2_ASSERT,
+ MAX_CORE_ERROR_HANDLE
+};
+
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ MAX_CORE_EVENT_OPCODE
+};
+
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+};
+
+struct core_ll2_rx_prod {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+ __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START,
+ CORE_RAMROD_TX_QUEUE_START,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd;
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+struct core_rx_cqe_opaque_data {
+ __le32 data[2];
+};
+
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_TYPE_REGULAR,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+ CORE_RX_CQE_TYPE_SLOW_PATH,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+struct core_rx_fast_path_cqe {
+ u8 type;
+ u8 placement_offset;
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length;
+ __le16 vlan;
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved[4];
+};
+
+struct core_rx_gsi_offload_cqe {
+ u8 type;
+ u8 data_length_error;
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length;
+ __le16 vlan;
+ __le32 src_mac_addrhi;
+ __le16 src_mac_addrlo;
+ u8 reserved1[2];
+ __le32 gid_dst[4];
+};
+
+struct core_rx_slow_path_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ __le32 reserved1[7];
+};
+
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+ struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base;
+ struct regpair cqe_pbl_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 drop_ttl0_flg;
+ __le16 num_of_pbl_pages;
+ u8 inner_vlan_removal_en;
+ u8 queue_id;
+ u8 main_func_queue;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag;
+ u8 reserved[7];
+};
+
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 queue_id;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+struct core_tx_bd_flags {
+ u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+};
+
+struct core_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 nw_vlan_or_lb_echo;
+ u8 bitfield0;
+#define CORE_TX_BD_NBDS_MASK 0xF
+#define CORE_TX_BD_NBDS_SHIFT 0
+#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
+#define CORE_TX_BD_RESERVED0_MASK 0x7
+#define CORE_TX_BD_RESERVED0_SHIFT 5
+ struct core_tx_bd_flags bd_flags;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED1_MASK 0x1
+#define CORE_TX_BD_RESERVED1_SHIFT 15
+};
+
+enum core_tx_dest {
+ CORE_TX_DEST_NW,
+ CORE_TX_DEST_LB,
+ MAX_CORE_TX_DEST
+};
+
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 stats_en;
+ u8 stats_id;
+ u8 conn_type;
+ __le16 pbl_size;
+ __le16 qm_pq_id;
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -636,9 +874,33 @@ struct hsi_fp_ver_struct {
};
/* Mstorm non-triggering VF zone */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID,
+ VF_ZONE_FUNC_NOT_ENABLED,
+ ETH_PACKET_TOO_SMALL,
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION,
+ ETH_ILLEGAL_INBAND_TAGS,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP,
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC,
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/* Mstorm VF zone */
@@ -705,13 +967,17 @@ struct pf_start_ramrod_data {
struct protocol_dcb_data {
u8 dcb_enable_flag;
+ u8 reserved_a;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved;
+ u8 reserved_b;
+ u8 reserved0;
};
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
+ u8 update_rx_def_ucast_clss;
+ u8 update_rx_def_non_ucast_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
@@ -727,7 +993,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[3];
+ __le16 reserved[2];
};
struct pf_update_ramrod_data {
@@ -736,16 +1002,17 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved2;
+ __le16 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -766,10 +1033,14 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
-/* Pstorm non-triggering VF zone */
+struct rdma_sent_stats {
+ struct regpair sent_bytes;
+ struct regpair sent_pkts;
+};
+
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
- struct regpair reserved[2];
+ struct rdma_sent_stats rdma_stats;
};
/* Pstorm VF zone */
@@ -786,7 +1057,11 @@ struct ramrod_header {
__le16 echo;
};
-/* Slowpath Element (SPQE) */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes;
+ struct regpair rcv_pkts;
+};
+
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -794,7 +1069,7 @@ struct slow_path_element {
/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {
- struct regpair reserved[2];
+ struct rdma_rcv_stats rdma_stats;
};
struct tstorm_per_port_stat {
@@ -802,9 +1077,14 @@ struct tstorm_per_port_stat {
struct regpair mac_error_discard;
struct regpair mftag_filter_discard;
struct regpair eth_mac_filter_discard;
- struct regpair reserved[5];
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair reserved;
+ struct regpair roce_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1[2];
+ struct regpair reserved1;
+ struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
@@ -870,7 +1150,13 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
-/* Attentions status block */
+enum vf_zone_size_mode {
+ VF_ZONE_SIZE_MODE_DEFAULT,
+ VF_ZONE_SIZE_MODE_DOUBLE,
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1579,6 +1865,7 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
MAX_DBG_STATUS
};
@@ -1589,7 +1876,41 @@ enum dbg_status {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
-/* QM per-port init parameters */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc;
+ __le32 headroom_per_tc;
+ __le32 min_pkt_size;
+ __le32 max_ports_per_engine;
+ u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ __le16 weight;
+};
+
+struct init_ets_req {
+ __le32 mtu;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+struct init_nig_lb_rl_req {
+ __le16 lb_mac_rate;
+ __le16 lb_rate;
+ __le32 mtu;
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id;
+ u8 valid;
+};
+
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -1619,7 +1940,7 @@ struct init_qm_vport_params {
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
@@ -1674,11 +1995,11 @@ struct bin_buffer_hdr {
/* binary init buffer types */
enum bin_init_buffer_type {
- BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
- BIN_BUF_IRO,
+ BIN_BUF_INIT_IRO,
MAX_BIN_INIT_BUFFER_TYPE
};
@@ -1918,44 +2239,34 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
#define MAX_NAME_LEN 16
/* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD \
- 0x00f000UL
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM \
- 0x010000UL
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM \
- 0x011000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
- 0x012000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM \
- 0x013000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
- 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
- 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM \
- 0x016000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM \
- 0x017000UL
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM \
- 0x018000UL
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -2003,7 +2314,7 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u8 start_vport;
u8 num_vports;
- u8 pf_wfq;
+ u16 pf_wfq;
u32 pf_rl;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
@@ -2138,6 +2449,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
@@ -2153,42 +2467,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[21].base + ((pf_id) * IRO[21].m1))
+ (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+ (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[23].base + ((pf_id) * IRO[23].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+ (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[25].base + ((pf_id) * IRO[25].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+ (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[26].base + ((ethtype) * IRO[26].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+ (IRO[27].base + ((ethtype) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[29].base + ((queue_id) * IRO[29].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
-
-static const struct iro iro_arr[46] = {
+ (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x78, 0x0, 0x0, 0x78},
{0x6318, 0x20, 0x0, 0x0, 0x20},
@@ -2201,20 +2563,21 @@ static const struct iro iro_arr[46] = {
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x4990, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x60f8, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xc9a8, 0x0, 0x0, 0x0, 0x4},
- {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x53a0, 0x80, 0x4, 0x0, 0x4},
+ {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba0, 0x80, 0x0, 0x0, 0x20},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0xf188, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xacf0, 0x0, 0x0, 0x0, 0xf0},
{0xade0, 0x8, 0x0, 0x0, 0x8},
@@ -2226,455 +2589,457 @@ static const struct iro iro_arr[46] = {
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
{0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12120, 0x10, 0x0, 0x0, 0x8},
- {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0x12c38, 0x10, 0x0, 0x0, 0x8},
+ {0x11aa0, 0x38, 0x0, 0x0, 0x18},
{0xa8c0, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
- {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
- {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
-
-#define RUNTIME_ARRAY_SIZE 33925
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30851
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+
+#define RUNTIME_ARRAY_SIZE 33927
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -3201,7 +3566,31 @@ struct eth_conn_context {
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
-/* opcodes for the event ring */
+enum eth_error_code {
+ ETH_OK = 0x00,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ MAX_ETH_ERROR_CODE
+};
+
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -3269,7 +3658,13 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
-/* Ethernet Ramrod Command IDs */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG,
+ ETH_IPV4_FIRST_FRAG,
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -3451,8 +3846,8 @@ struct rx_queue_start_ramrod_data {
u8 toggle_val;
u8 vf_rx_prod_index;
-
- u8 reserved[6];
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
__le16 reserved1;
struct regpair cqe_pbl_addr;
struct regpair bd_base;
@@ -3526,10 +3921,11 @@ struct tx_queue_start_ramrod_data {
__le16 pxp_st_index;
__le16 comp_agg_size;
__le16 queue_zone_id;
- __le16 test_dup_count;
+ __le16 reserved2;
__le16 pbl_size;
__le16 tx_queue_id;
-
+ __le16 same_as_last_id;
+ __le16 reserved[3];
struct regpair pbl_base_addr;
struct regpair bd_cons_address;
};
@@ -4926,8 +5322,8 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
@@ -4988,6 +5384,10 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+};
+
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
@@ -7239,6 +7639,12 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
@@ -7315,10 +7721,10 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_BW_UPDATE5,
- MFW_DRV_MSG_BW_UPDATE6,
- MFW_DRV_MSG_BW_UPDATE7,
- MFW_DRV_MSG_BW_UPDATE8,
- MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index e17885321faf..8ebdc79b3850 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,8 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_KERNEL);
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
int i;
if (!p_pool)
@@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
return NULL;
}
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
}
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
}
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 new_hw_addr)
+ struct qed_ptt *p_ptt, u32 new_hw_addr)
{
u32 prev_hw_addr;
@@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
}
static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr)
+ struct qed_ptt *p_ptt, u32 hw_addr)
{
u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
@@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *addr,
- u32 hw_addr,
- size_t n,
- bool to_device)
+ void *addr, u32 hw_addr, size_t n, bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
size_t quota, done = 0;
@@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- void *dest, u32 hw_addr, size_t n)
+ struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr, void *src, size_t n)
+ struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
{
u16 control = 0;
@@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
}
void qed_port_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 port_id)
+ struct qed_ptt *p_ptt, u8 port_id)
{
u16 control = 0;
@@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u16 control = 0;
@@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
int qed_status = 0;
/* verify address is not NULL */
- if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
- ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
return -EINVAL;
}
@@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
NETIF_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
@@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
- *(((u32 *)command) + i) : 0;
+ *(((u32 *)p_command) + i) : 0;
qed_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
@@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
(i * sizeof(u32)), data);
}
- qed_wr(p_hwfn, p_ptt,
- qed_dmae_idx_to_go_cmd(idx_cmd),
- DMAE_GO_VALUE);
+ qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
return qed_status;
}
@@ -498,9 +481,7 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(u32),
- p_addr,
- GFP_KERNEL);
+ sizeof(u32), p_addr, GFP_KERNEL);
if (!*p_comp) {
DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
goto err;
@@ -543,8 +524,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
- p_hwfn->dmae_info.p_completion_word,
- p_phys);
+ p_hwfn->dmae_info.p_completion_word, p_phys);
p_hwfn->dmae_info.p_completion_word = NULL;
}
@@ -552,8 +532,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
- p_hwfn->dmae_info.p_dmae_cmd,
- p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd, p_phys);
p_hwfn->dmae_info.p_dmae_cmd = NULL;
}
@@ -571,9 +550,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
{
- u32 wait_cnt = 0;
- u32 wait_cnt_limit = 10000;
-
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
int qed_status = 0;
barrier();
@@ -606,7 +583,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
u64 dst_addr,
u8 src_type,
u8 dst_type,
- u32 length)
+ u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +601,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(uintptr_t)src_addr,
- length * sizeof(u32));
+ length_dw * sizeof(u32));
break;
default:
return -EINVAL;
@@ -645,7 +622,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length_dw = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length_dw);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -654,16 +631,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
- src_addr,
- dst_addr,
- length);
+ src_addr, dst_addr, length_dw);
return qed_status;
}
if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
memcpy((void *)(uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
- length * sizeof(u32));
+ length_dw * sizeof(u32));
return 0;
}
@@ -730,10 +705,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status,
- src_addr,
- dst_addr,
- length_cur);
+ qed_status, src_addr, dst_addr, length_cur);
break;
}
}
@@ -743,10 +715,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr,
- u32 size_in_dwords,
- u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -768,9 +737,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -791,12 +761,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
return rc;
}
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- dma_addr_t source_addr,
- dma_addr_t dest_addr,
- u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 9866a20d2128..8ce8564061d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
p_hwfn->rt_data.b_valid[i] = false;
}
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset, u32 *p_val,
- size_t size)
+ u32 rt_offset, u32 *p_val, size_t size)
{
size_t i;
@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u16 rt_offset,
- u16 size,
- bool b_must_dmae)
+ u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
- qed_wr(p_hwfn, p_ptt, addr + (i << 2),
- p_init_val[i]);
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
- if (rc != 0)
+ if (rc)
return rc;
/* Jump over the entire segment, including invalid entry */
@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count,
- QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
u32 i;
@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
- bool b_must_dmae,
- bool b_can_dmae)
+ bool b_must_dmae, bool b_can_dmae)
{
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
- u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
array_data = cdev->fw_data->arr_data;
- hdr = (union init_array_hdr *)(array_data +
- dmae_array_offset);
+ hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct init_write_op *cmd,
- bool b_can_dmae)
+ struct init_write_op *p_cmd, bool b_can_dmae)
{
- u32 data = le32_to_cpu(cmd->data);
- u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 data = le32_to_cpu(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- union init_write_args *arg = &cmd->args;
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ union init_write_args *arg = &p_cmd->args;
int rc = 0;
/* Sanitize */
@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
- qed_wr(p_hwfn, p_ptt, addr,
- le32_to_cpu(arg->inline_val));
+ data = le32_to_cpu(p_cmd->args.inline_val);
+ qed_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
- if (b_must_dmae ||
- (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ data = le32_to_cpu(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
- qed_init_fill(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
- rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_read_op *cmd)
+ struct qed_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
- u16 *offset,
- int modes)
+ u16 *p_offset, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*offset)++];
+ tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
- return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
- return (modes & (1 << tree_val)) ? 1 : 0;
+ return (modes & BIT(tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
- struct init_if_mode_op *p_cmd,
- int modes)
+ struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
- u32 phase,
- u32 phase_id)
+ u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
}
int qed_init_run(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int phase,
- int phase_id,
- int modes)
+ struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
@@ -557,7 +534,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
/* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
- offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 8fa50fa23c8d..61ec973a06c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1775,10 +1775,9 @@ struct qed_sb_attn_info {
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
- struct qed_sb_attn_info *p_sb_desc)
+ struct qed_sb_attn_info *p_sb_desc)
{
- u16 rc = 0;
- u16 index;
+ u16 rc = 0, index;
/* Make certain HW write took affect */
mmiowb();
@@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
* @param asserted_bits newly asserted bits
* @return int
*/
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
- u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
u32 igu_mask;
/* Mask the source of the attention in the IGU */
- igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if ((p_bit->flags & ATTENTION_PARITY) &&
- !!(parities & (1 << bit_idx)))
+ !!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
- aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
@@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn,
- "MFW indication via attention\n");
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
@@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
return rc;
}
- if (deasserted_bits) {
+ if (deasserted_bits)
rc = qed_int_deassertion(p_hwfn, deasserted_bits);
- if (rc)
- return rc;
- }
return rc;
}
static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
- void __iomem *igu_addr,
- u32 ack_cons)
+ void __iomem *igu_addr, u32 ack_cons)
{
struct igu_prod_cons_update igu_ack = { 0 };
@@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
- DP_ERR(
- p_hwfn->cdev,
- "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
@@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
}
if (!sb_attn || !sb_attn->sb_attn) {
- DP_ERR(
- p_hwfn->cdev,
- "Attentions Status block is NULL - cannot check for new attentions!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
@@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
if (p_sb->sb_attn)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
+ p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
}
@@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
int i, j, k;
@@ -2378,8 +2365,8 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_sb_attn_info *p_sb;
- void *p_virt;
dma_addr_t p_phys = 0;
+ void *p_virt;
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
@@ -2412,9 +2399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
- u8 pf_id,
- u16 vf_number,
- u8 vf_valid)
+ u8 pf_id, u16 vf_number, u8 vf_valid)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
@@ -2428,12 +2413,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
- /* setting the time resultion to a fixed value ( = 1) */
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
- QED_CAU_DEF_RX_TIMER_RES);
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
- QED_CAU_DEF_TX_TIMER_RES);
-
cau_state = CAU_HC_DISABLE_STATE;
if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2447,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
- u16 igu_sb_id,
- u16 vf_number,
- u8 vf_valid)
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid)
{
struct cau_sb_entry sb_entry;
@@ -2514,8 +2491,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- QED_COAL_RX_STATE_MACHINE,
- timeset);
+ QED_COAL_RX_STATE_MACHINE, timeset);
if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -2541,8 +2517,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u8 timeset)
{
struct cau_pi_entry pi_entry;
- u32 sb_offset;
- u32 pi_offset;
+ u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
@@ -2569,8 +2544,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_sb_info *sb_info)
+ struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
@@ -2590,8 +2564,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
*
* @return u16
*/
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
- u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2603,8 +2576,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
- (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+ if (sb_id == QED_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
return igu_sb_id;
}
@@ -2612,9 +2589,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2625,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
}
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2659,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
kfree(p_sb);
}
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_sb_sp_info *p_sb;
dma_addr_t p_phys = 0;
@@ -2721,9 +2694,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
- void *cookie,
- u8 *sb_idx,
- __le16 **p_fw_cons)
+ void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int rc = -ENOMEM;
@@ -2764,8 +2735,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
}
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
@@ -2809,7 +2779,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
- if (rc != 0) {
+ if (rc) {
DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
return -EINVAL;
}
@@ -2822,8 +2792,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
@@ -2950,13 +2919,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid, b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 sb_id)
{
u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY +
- sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
struct qed_igu_block *p_block;
p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2950,7 @@ out:
return val;
}
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
@@ -3104,22 +3070,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
*/
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
{
- u32 igu_pf_conf = 0;
-
- igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
}
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
{
- u64 intr_status = 0;
- u32 intr_status_lo = 0;
- u32 intr_status_hi = 0;
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE;
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
intr_status_lo = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,8 +3116,7 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
kfree(p_hwfn->sp_dpc);
}
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
@@ -3169,10 +3131,9 @@ int qed_int_alloc(struct qed_hwfn *p_hwfn,
return rc;
}
rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
- if (rc) {
+ if (rc)
DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
- return rc;
- }
+
return rc;
}
@@ -3183,8 +3144,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
qed_int_sp_dpc_free(p_hwfn);
}
-void qed_int_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
qed_int_sb_attn_setup(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 401e738543b5..4409ea3f7d40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
u16 rx_mode = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
/* TPA related fields */
- memset(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
+ memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
@@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->tx_switching_en = p_params->tx_switching;
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- if (p_params->update_approx_mcast_flg) {
- p_ramrod->common.update_approx_mcast_flg = 1;
- for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
- __le32 val = cpu_to_le32(p_bins[i]);
+ if (!p_params->update_approx_mcast_flg)
+ return;
- p_ramrod->approx_mcast.bins[i] = val;
- }
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
- p_cmn->update_accept_any_vlan_flg =
- p_params->update_accept_any_vlan_flg;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
return rc;
}
@@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid(
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
int rc = -EINVAL;
/* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = params->vport_id;
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
+ if (rc)
return rc;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, params->queue_id, params->vport_id,
- params->sb);
+ opaque_fid,
+ cid, p_params->queue_id, p_params->vport_id, p_params->sb);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(params->sb);
- p_ramrod->sb_index = params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
- p_ramrod->complete_cqe_flg = 0;
- p_ramrod->complete_event_flg = 1;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
- p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
- p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- p_ramrod->vf_rx_prod_index = params->vf_qid;
- if (params->vf_qid)
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = p_params->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_use_zone_a_prod ? " [legacy]" : "",
+ p_params->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ }
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
static int
qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
- params->queue_id,
- params->sb,
- params->sb_idx,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
+ if (rc)
return rc;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
return rc;
*pp_prod = (u8 __iomem *)p_hwfn->regview +
@@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- params,
+ p_params,
abs_stats_id,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size);
+ cqe_pbl_addr, cqe_pbl_size, false);
- if (rc != 0)
+ if (rc)
qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
return rc;
@@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
- p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
- p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = qed_get_qm_pq(p_hwfn,
- PROTOCOLID_ETH,
- p_pq_params);
- p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
memset(&pq_params, 0, sizeof(pq_params));
/* Allocate a CID for the queue */
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_tx_cid->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
}
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
{
enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
@@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
- p_second_filter->type = p_first_filter->type;
- p_second_filter->mac_msb = p_first_filter->mac_msb;
- p_second_filter->mac_mid = p_first_filter->mac_mid;
- p_second_filter->mac_lsb = p_first_filter->mac_lsb;
- p_second_filter->vlan_id = p_first_filter->vlan_id;
- p_second_filter->vni = p_first_filter->vni;
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
p_first_filter->vport_id = vport_to_remove_from;
- p_second_filter->action = ETH_FILTER_ACTION_ADD;
- p_second_filter->vport_id = vport_to_add_to;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
p_first_filter->vport_id = vport_to_add_to;
memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
&p_ramrod, &p_ent,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
return rc;
}
@@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
p_header->assert_on_error = p_filter_cmd->assert_on_error;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc != 0) {
- DP_ERR(p_hwfn,
- "Unicast filter ADD command failed %d\n",
- rc);
+ if (rc) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
return rc;
}
@@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* Return:
******************************************************************************/
static u32 qed_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length,
- u32 crc32_seed,
- u8 complement)
+ u32 crc32_length, u32 crc32_seed, u8 complement)
{
- u32 byte = 0;
- u32 bit = 0;
- u8 msb = 0;
- u8 current_byte = 0;
- u32 crc32_result = crc32_seed;
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
if ((!crc32_packet) ||
(crc32_length == 0) ||
@@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static inline u32 qed_crc32c_le(u32 seed,
- u8 *mac,
- u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
{
u32 packet_buf[2] = { 0 };
@@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc, i;
- if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ if (p_filter_cmd->opcode == QED_FILTER_ADD)
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
&abs_vport_id);
- if (rc)
- return rc;
- } else {
+ else
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
&abs_vport_id);
- if (rc)
- return rc;
- }
+ if (rc)
+ return rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
- struct vport_update_ramrod_mcast *approx_mcast;
- approx_mcast = &p_ramrod->approx_mcast;
- approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_mcast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
+ comp_mode, p_comp_data);
}
return rc;
}
@@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_ucast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
- if (rc != 0)
+ comp_mode, p_comp_data);
+ if (rc)
break;
}
@@ -1590,8 +1578,7 @@ out:
}
}
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
@@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+ info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
}
qed_fill_dev_info(cdev, &info->common);
@@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev,
return 0;
}
-static int qed_stop_vport(struct qed_dev *cdev,
- u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
{
int rc, i;
@@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- vport_id);
+ p_hwfn->hw_info.opaque_fid, vport_id);
if (rc) {
DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev,
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg =
- params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg =
- params->update_vport_active_flg;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev,
* We need to re-fix the rss values per engine for CMT.
*/
if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss =
- &params->rss_params;
+ struct qed_update_vport_rss_params *rss = &params->rss_params;
int k, max = 0;
/* Find largest entry, since it's possible RSS needs to
@@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev,
QED_RSS_IND_TABLE_SIZE * sizeof(u16));
memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
QED_RSS_KEY_SIZE * sizeof(u32));
+ sp_params.rss_params = &sp_rss_params;
}
- sp_params.rss_params = &sp_rss_params;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev,
u16 cqe_pbl_size,
void __iomem **pp_prod)
{
- int rc, hwfn_index;
struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
hwfn_index = params->rss_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
@@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev,
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
params->rx_queue_id / cdev->num_hwfns,
- params->eq_completion_only,
- false);
+ params->eq_completion_only, false);
if (rc) {
DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
return rc;
@@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
memset(&accept_flags, 0, sizeof(accept_flags));
- accept_flags.update_rx_mode_config = 1;
- accept_flags.update_tx_mode_config = 1;
- accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
- QED_ACCEPT_MCAST_MATCHED |
- QED_ACCEPT_BCAST;
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
QED_ACCEPT_MCAST_MATCHED |
QED_ACCEPT_BCAST;
@@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
struct qed_filter_ucast ucast;
if (!params->vlan_valid && !params->mac_valid) {
- DP_NOTICE(
- cdev,
- "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ DP_NOTICE(cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
return -EINVAL;
}
@@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
for (i = 0; i < mcast.num_mc_addrs; i++)
ether_addr_copy(mcast.mac[i], params->mac[i]);
- return qed_filter_mcast_cmd(cdev, &mcast,
- QED_SPQ_MODE_CB, NULL);
+ return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev,
accept_flags = params->filter.accept_flags;
return qed_configure_filter_rx_mode(cdev, accept_flags);
default:
- DP_NOTICE(cdev, "Unknown filter type %d\n",
- (int)params->type);
+ DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
return -EINVAL;
}
}
static int qed_fp_cqe_completion(struct qed_dev *dev,
- u8 rss_id,
- struct eth_slow_path_rx_cqe *cqe)
+ u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
cqe);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 002114543451..e495d62fcc03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params {
u16 opaque_fid;
u8 vport_id;
u16 mtu;
+ bool check_mac;
+ bool check_ethtype;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
@@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
@@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c7dc34bfdd0a..32f71ee57191 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -51,8 +51,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
static int __init qed_init(void)
{
- pr_notice("qed_init called\n");
-
pr_info("%s", version);
return 0;
@@ -106,8 +104,7 @@ static void qed_free_pci(struct qed_dev *cdev)
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
-static int qed_init_pci(struct qed_dev *cdev,
- struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id;
int rc;
@@ -263,8 +260,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
}
/* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
- pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
{
if (!cdev)
return -ENODEV;
@@ -366,8 +362,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
- rc = pci_enable_msix_exact(cdev->pdev,
- int_params->msix_table, cnt);
+ rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+ cnt);
if (!rc)
rc = cnt;
}
@@ -439,6 +435,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
}
out:
+ if (!rc)
+ DP_INFO(cdev, "Using %s interrupts\n",
+ int_params->out.int_mode == QED_INT_MODE_INTA ?
+ "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+ "MSI" : "MSIX");
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
@@ -514,19 +515,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
struct qed_dev *cdev = hwfn->cdev;
+ u32 int_mode;
int rc = 0;
u8 id;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX) {
id = hwfn->my_id;
snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
- if (!rc)
- DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
- "Requested slowpath MSI-X\n");
} else {
unsigned long flags = 0;
@@ -541,6 +541,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
flags, cdev->name, cdev);
}
+ if (rc)
+ DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+ else
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath %s\n",
+ (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
return rc;
}
@@ -974,8 +981,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
@@ -1025,20 +1031,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
- (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
+ (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
- if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1168,50 +1177,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = SUPPORTED_FIBRE;
+ if_link->supported_caps = QED_LM_FIBRE_BIT;
if (params.speed.autoneg)
- if_link->supported_caps |= SUPPORTED_Autoneg;
+ if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
- if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
if (params.pause.autoneg || params.pause.forced_rx ||
params.pause.forced_tx)
- if_link->supported_caps |= SUPPORTED_Pause;
+ if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= 0;
+ if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= 0;
+ if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.link_up)
if_link->speed = link.speed;
@@ -1231,33 +1246,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Half;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_FD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_10G)
- if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_40G)
- if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_50G)
- if_link->lp_caps |= 0;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_100G)
- if_link->lp_caps |= 0;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
+ if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.an_complete)
- if_link->lp_caps |= SUPPORTED_Autoneg;
+ if_link->lp_caps |= QED_LM_Autoneg_BIT;
if (link.partner_adv_pause)
- if_link->lp_caps |= SUPPORTED_Pause;
+ if_link->lp_caps |= QED_LM_Pause_BIT;
if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
- if_link->lp_caps |= SUPPORTED_Asym_Pause;
+ if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
}
static void qed_get_current_link(struct qed_dev *cdev,
@@ -1391,3 +1402,24 @@ const struct qed_common_ops qed_common_ops_pass = {
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats)
+{
+ struct qed_eth_stats eth_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+ qed_get_vport_stats(cdev, &eth_stats);
+ stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ break;
+ default:
+ DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a240f26344a4..4c212667b482 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
return true;
}
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
- p_info->mfw_mb_shadow =
- kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_KERNEL);
+ p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
@@ -189,8 +183,7 @@ err:
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{
spin_lock_bh(&p_hwfn->mcp_info->lock);
@@ -221,15 +214,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
@@ -326,7 +317,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
*o_mcp_resp = 0;
rc = -EAGAIN;
}
@@ -342,7 +334,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -399,8 +391,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_load_code)
+ struct qed_ptt *p_ptt, u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
@@ -527,8 +518,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
"Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
transceiver_state,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data)));
+ offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
ETH_TRANSCEIVER_STATE);
@@ -540,8 +530,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_reset)
+ struct qed_ptt *p_ptt, bool b_reset)
{
struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
@@ -557,8 +546,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
"Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- link_status)));
+ offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
@@ -635,6 +623,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_20G : 0;
p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_40G : 0;
p_link->partner_adv_speed |=
@@ -722,6 +713,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum qed_mcp_protocol_type stats_type;
+ union qed_mcp_protocol_stats stats;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ u32 hsi_param;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = QED_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ stats_type = QED_MCP_FCOE_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+ break;
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ stats_type = QED_MCP_ISCSI_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+ break;
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ stats_type = QED_MCP_RDMA_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ memcpy(&union_data, &stats, sizeof(stats));
+ mb_params.p_data_src = &union_data;
+ qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
struct public_func *p_shmem_info)
{
@@ -752,8 +785,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
+ struct public_func *p_data, int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
@@ -763,51 +795,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
memset(p_data, 0, sizeof(*p_data));
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf)
-{
- struct public_func shmem_info;
- int i;
-
- /* Find first Ethernet interface in port */
- for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
- i += p_hwfn->cdev->num_ports_in_engines) {
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID_BY_REL(p_hwfn, i));
-
- if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
- continue;
-
- if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
- FUNC_MF_CFG_PROTOCOL_ETHERNET) {
- *p_pf = (u8)i;
- return 0;
- }
- }
-
- DP_NOTICE(p_hwfn,
- "Failed to find on port an ethernet interface in MF_SI mode\n");
-
- return -EINVAL;
-}
-
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
@@ -867,6 +868,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
@@ -940,8 +947,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
@@ -950,7 +956,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -1003,15 +1009,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_mcp_function_info *info;
struct public_func shmem_info;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &info->protocol)) {
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
@@ -1072,15 +1076,13 @@ struct qed_mcp_link_capabilities
return &p_hwfn->mcp_info->link_capabilities;
}
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 1000,
- &resp, &param);
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
msleep(1020);
@@ -1089,8 +1091,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_flash_size)
+ struct qed_ptt *p_ptt, u32 *p_flash_size)
{
u32 flash_size;
@@ -1168,8 +1169,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_led_mode mode)
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
u32 resp = 0, param = 0, drv_mb_param;
int rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 7f319aa1b229..c6372fa574b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -60,9 +60,10 @@ struct qed_mcp_link_state {
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
-#define QED_LINK_PARTNER_SPEED_40G BIT(4)
-#define QED_LINK_PARTNER_SPEED_50G BIT(5)
-#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+#define QED_LINK_PARTNER_SPEED_25G BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G BIT(7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
@@ -105,6 +106,47 @@ struct qed_mcp_drv_version {
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
+struct qed_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+ QED_MCP_LAN_STATS,
+ QED_MCP_FCOE_STATS,
+ QED_MCP_ISCSI_STATS,
+ QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+ struct qed_mcp_lan_stats lan_stats;
+ struct qed_mcp_fcoe_stats fcoe_stats;
+ struct qed_mcp_iscsi_stats iscsi_stats;
+ struct qed_mcp_rdma_stats rdma_stats;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -458,6 +500,4 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
u8 min_bw);
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f6b86ca1ff79..b49d47f3de71 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -116,8 +116,14 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+ 0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
+#define TCFC_REG_STRONG_ENABLE_VF \
+ 0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+ 0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a52f3fc051f5..2888eb0628f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -25,9 +25,7 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u8 cmd,
- u8 protocol,
- struct qed_sp_init_data *p_data)
+ u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
rc = qed_spq_get_entry(p_hwfn, pp_ent);
- if (rc != 0)
+ if (rc)
return rc;
p_ent = *pp_ent;
@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
- PROTOCOLID_COMMON,
- &init_data);
+ PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
- qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
- &p_ramrod->tunnel_config);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index,
- p_ramrod->outer_tag);
+ sb, sb_index, p_ramrod->outer_tag);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d73456eab1d7..0265a32c8681 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -41,8 +41,7 @@
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
- union event_ring_data *data,
- u8 fw_return_code)
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
@@ -109,9 +108,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
{
p_ent->flags = 0;
@@ -189,8 +187,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
- struct qed_spq *p_spq,
- struct qed_spq_entry *p_ent)
+ struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -255,8 +252,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
/***************************************************************************
* EQ API
***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
- u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +263,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
mmiowb();
}
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
- void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
@@ -323,8 +317,7 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
@@ -348,11 +341,8 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
}
/* register EQ completion on the SP SB */
- qed_int_register_cb(p_hwfn,
- qed_eq_completion,
- p_eq,
- &p_eq->eq_sb_index,
- &p_eq->p_fw_cons);
+ qed_int_register_cb(p_hwfn, qed_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
@@ -361,14 +351,12 @@ eq_allocate_fail:
return NULL;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
if (!p_eq)
return;
@@ -379,10 +367,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
-static int qed_cqe_completion(
- struct qed_hwfn *p_hwfn,
- struct eth_slow_path_rx_cqe *cqe,
- enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
@@ -463,8 +450,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
u32 capacity;
/* SPQ struct */
- p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM;
@@ -525,9 +511,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
kfree(p_spq);
}
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +522,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
@@ -564,8 +549,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +570,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -604,8 +587,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_en2->list);
/* Copy the ring element physical pointer to the new
@@ -655,8 +637,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
- struct list_head *head,
- u32 keep_reserve)
+ struct list_head *head, u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
@@ -690,8 +671,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
if (!p_ent)
return -EINVAL;
@@ -705,8 +685,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *fw_return_code)
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -803,8 +782,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return -EINVAL;
spin_lock_bh(&p_spq->lock);
- list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
- list) {
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
@@ -846,15 +824,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
if (!found) {
DP_NOTICE(p_hwfn,
- "Failed to find an entry this EQE completes\n");
+ "Failed to find an entry this EQE [echo %04x] completes\n",
+ le16_to_cpu(echo));
return -EEXIST;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -901,14 +886,12 @@ consq_allocate_fail:
return NULL;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
if (!p_consq)
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 15399da268d9..cb68674640f9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
}
fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
- if (fp_minor > ETH_HSI_VER_MINOR) {
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
@@ -699,7 +700,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
- val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
@@ -1090,13 +1091,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
- if (!(tlvs_mask & (1 << i)))
+ if (!(tlvs_mask & BIT(i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
- if (tlvs_accepted & (1 << i))
+ if (tlvs_accepted & BIT(i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1241,6 +1242,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
return PFVF_STATUS_NO_RESOURCE;
}
@@ -1280,22 +1291,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overriden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
- DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
- vf->abs_vf_id,
- req->vfdev_info.eth_fp_hsi_major,
- req->vfdev_info.eth_fp_hsi_minor,
- ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
-
- /* Write the PF version so that VF would know which version
- * is supported.
- */
- pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
- pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
- goto out;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
}
/* On 100g PFs, prevent old VFs from loading */
@@ -1334,8 +1365,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
- pfdev_info->minor_fp_hsi = min_t(u8,
- ETH_HSI_VER_MINOR,
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1472,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = qed_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- QED_SPQ_MODE_CB, NULL);
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1494,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ if ((events & BIT(VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
@@ -1479,7 +1510,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if (events & BIT(MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1502,7 +1533,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
- if (events & (1 << VLAN_ADDR_FORCED)) {
+ if (events & BIT(VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
@@ -1572,7 +1603,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
- p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
@@ -1619,8 +1650,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
- vf->igu_sbs[sb_id],
- vf->abs_vf_id, 1);
+ vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
@@ -1632,7 +1662,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
- if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1650,9 +1680,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
+ params.check_mac = true;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
@@ -1679,7 +1710,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
@@ -1695,21 +1726,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *vf, u8 status)
+ struct qed_vf_info *vf,
+ u8 status, bool b_legacy)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone,
@@ -1717,7 +1759,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
- qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
@@ -1728,6 +1770,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
+ bool b_legacy_vf = false;
int rc;
memset(&params, 0, sizeof(params));
@@ -1743,13 +1786,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ b_legacy_vf = true;
+ } else {
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+ }
+
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->bd_max_bytes,
req->rxq_addr,
- req->cqe_pbl_addr, req->cqe_pbl_size);
+ req->cqe_pbl_addr, req->cqe_pbl_size,
+ b_legacy_vf);
if (rc) {
status = PFVF_STATUS_FAILURE;
@@ -1760,7 +1817,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
@@ -1769,23 +1826,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
DQ_DEMS_LEGACY);
}
- qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
@@ -2045,7 +2117,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
- if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
@@ -2340,7 +2412,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2446,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
int i;
/* If we're in forced-mode, we don't allow any change */
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
/* First remove entries and then add new ones */
@@ -2509,7 +2581,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
}
/* Determine if the unicast filtering is acceptible by PF */
- if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2593,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2749,7 +2821,7 @@ cleanup:
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
- ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
@@ -2805,7 +2877,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
continue;
vfid = p_vf->abs_vf_id;
- if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
@@ -3064,8 +3136,7 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
@@ -3163,7 +3234,7 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
@@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 9b780b31b15c..3c9071de5472 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
return p_tlv;
}
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
@@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = -EBUSY;
- goto exit;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
-exit:
- mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
-
return rc;
}
@@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn,
QED_MSG_IOV, "attempting to acquire resources\n");
+ /* Clear response buffer, as this might be a re-send */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
@@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* PF agrees to allocate our resources */
if (!(resp->pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
- DP_INFO(p_hwfn,
- "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
- return -EINVAL;
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
resources_acquired = true;
@@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
attempts < VF_ACQUIRE_THRESH) {
qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = -EINVAL;
+ goto exit;
+ }
- /* Clear response buffer */
- memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
- } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
- pfdev_info->major_fp_hsi &&
- (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
- DP_NOTICE(p_hwfn,
- "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
- pfdev_info->major_fp_hsi,
- pfdev_info->minor_fp_hsi,
- ETH_HSI_VER_MAJOR,
- ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
- return -EINVAL;
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn,
+ "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to see if it supports FW-version override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+ rc = -EINVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto exit;
}
}
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
- if (ETH_HSI_VER_MINOR &&
+ if (!p_iov->b_pre_fp_hsi &&
+ ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
}
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
@@ -347,6 +392,9 @@ free_p_iov:
return -ENOMEM;
}
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_qid,
@@ -374,6 +422,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (pp_prod && p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -381,13 +444,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
/* Learn the address of the producer from the response */
- if (pp_prod) {
+ if (pp_prod && !p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -399,6 +464,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -424,10 +491,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -470,13 +542,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
if (pp_doorbell) {
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ u32 db_addr;
+
+ db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY);
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ db_addr;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
tx_queue_id, *pp_doorbell, resp->offset);
}
exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -501,10 +587,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -543,10 +634,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -567,10 +663,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -770,13 +871,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
@@ -797,14 +903,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
p_hwfn->b_int_enabled = 0;
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
@@ -828,6 +939,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
+ qed_vf_pf_req_end(p_hwfn, rc);
+
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
@@ -896,12 +1009,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
@@ -920,12 +1038,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
- return 0;
+ return rc;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b23ce58e932f..35db7a28aa13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
@@ -551,6 +551,11 @@ struct qed_vf_iov {
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
};
#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 02b06d4e40ae..e01adce4a966 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,7 +25,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 1
+#define QEDE_REVISION_VERSION 9
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
@@ -36,6 +36,8 @@
struct qede_stats {
u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
@@ -124,16 +126,22 @@ struct qede_dev {
(edev)->dev_info.num_tc)
struct qede_fastpath *fp_array;
- u16 req_rss;
- u16 num_rss;
+ u8 req_num_tx;
+ u8 fp_num_tx;
+ u8 req_num_rx;
+ u8 fp_num_rx;
+ u16 req_queues;
+ u16 num_queues;
u8 num_tc;
-#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
-#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
- (edev)->num_tc)
-#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
-#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
+ (edev)->num_tc)
+#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
+ QEDE_TSS_COUNT(edev))
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \
- (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))])
struct qed_int_info int_info;
@@ -235,6 +243,7 @@ struct qede_rx_queue {
u16 num_rx_buffers;
u16 rxq_id;
+ u64 rcv_pkts;
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -263,6 +272,10 @@ struct qede_tx_queue {
union db_prod tx_db;
u16 num_tx_buffers;
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+
+ bool is_legacy;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -277,7 +290,11 @@ struct qede_tx_queue {
struct qede_fastpath {
struct qede_dev *edev;
- u8 rss_id;
+#define QEDE_FASTPATH_TX BIT(0)
+#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+ u8 type;
+ u8 id;
struct napi_struct napi;
struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq;
@@ -337,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
-#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f8492cac9290..4d45945bc34c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -35,6 +35,7 @@ static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rcv_pkts),
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
@@ -44,6 +45,24 @@ static const struct {
#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
(*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
qede_rqstats_arr[(sindex)].offset)))
+#define QEDE_TQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_tx_queue, stat_name))
+#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_TQSTAT(stat_name) \
+ {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
+#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_tqstats_arr[] = {
+ QEDE_TQSTAT(xmit_pkts),
+ QEDE_TQSTAT(stopped_cnt),
+};
+
+#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
+ (*((u64 *)(((u64)(&dev->fp_array[tssid].txqs[tcid])) +\
+ qede_tqstats_arr[(sindex)].offset)))
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -107,6 +126,8 @@ static const struct {
QEDE_PF_STAT(mftag_filter_discards),
QEDE_PF_STAT(mac_filter_discards),
QEDE_STAT(tx_err_drop_pkts),
+ QEDE_STAT(ttl0_discard),
+ QEDE_STAT(packet_too_big_discard),
QEDE_STAT(coalesced_pkts),
QEDE_STAT(coalesced_events),
@@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
+ for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ int tc;
+
+ for (j = 0; j < QEDE_NUM_RQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d: %s", i, qede_rqstats_arr[j].string);
+ k += QEDE_NUM_RQSTATS;
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d.%d: %s", i, tc,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
+ }
+ }
+
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- strcpy(buf + j * ETH_GSTRING_LEN,
+ strcpy(buf + (k + j) * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
}
-
- for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
- strcpy(buf + j * ETH_GSTRING_LEN,
- qede_rqstats_arr[k].string);
}
static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
+ for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
+ int tc;
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
+ buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
+ }
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
+ buf[cnt++] = QEDE_TQSTATS_DATA(edev,
+ sidx,
+ qid, tc);
+ }
+ }
+ }
+
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
}
- for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
- buf[cnt] = 0;
- for (qid = 0; qid < edev->num_rss; qid++)
- buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
- cnt++;
- }
-
mutex_unlock(&edev->qede_lock);
}
@@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
- return num_stats + QEDE_NUM_RQSTATS;
+ return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
+ QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
@@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev)
return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
}
-static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+struct qede_link_mode_mapping {
+ u32 qed_link_mode;
+ u32 ethtool_link_mode;
+};
+
+static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
+ {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
+ {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
+ {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
+ {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
+ {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {QED_LM_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+};
+
+#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if ((caps) & (qed_lm_map[i].qed_link_mode)) \
+ __set_bit(qed_lm_map[i].ethtool_link_mode,\
+ lk_ksettings->link_modes.name); \
+ } \
+}
+
+#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if (test_bit(qed_lm_map[i].ethtool_link_mode, \
+ lk_ksettings->link_modes.name)) \
+ caps |= qed_lm_map[i].qed_link_mode; \
+ } \
+}
+
+static int qede_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
+ struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
- cmd->supported = current_link.supported_caps;
- cmd->advertising = current_link.advertised_caps;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
+
if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
- ethtool_cmd_speed_set(cmd, current_link.speed);
- cmd->duplex = current_link.duplex;
+ base->speed = current_link.speed;
+ base->duplex = current_link.duplex;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- cmd->port = current_link.port;
- cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
- AUTONEG_DISABLE;
- cmd->lp_advertising = current_link.lp_caps;
+ base->port = current_link.port;
+ base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
return 0;
}
-static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int qede_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
+ const struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
- u32 speed;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
-
memset(&current_link, 0, sizeof(current_link));
memset(&params, 0, sizeof(params));
edev->ops->common->get_link(edev->cdev, &current_link);
- speed = ethtool_cmd_speed(cmd);
params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (base->autoneg == AUTONEG_ENABLE) {
params.autoneg = true;
params.forced_speed = 0;
- params.adv_speeds = cmd->advertising;
- } else { /* forced speed */
+ QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
+ } else { /* forced speed */
params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
params.autoneg = false;
- params.forced_speed = speed;
- switch (speed) {
+ params.forced_speed = base->speed;
+ switch (base->speed) {
case SPEED_10000:
if (!(current_link.supported_caps &
- SUPPORTED_10000baseKR_Full)) {
+ QED_LM_10000baseKR_Full_BIT)) {
DP_INFO(edev, "10G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
+ break;
+ case SPEED_25000:
+ if (!(current_link.supported_caps &
+ QED_LM_25000baseKR_Full_BIT)) {
+ DP_INFO(edev, "25G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
break;
case SPEED_40000:
if (!(current_link.supported_caps &
- SUPPORTED_40000baseLR4_Full)) {
+ QED_LM_40000baseLR4_Full_BIT)) {
DP_INFO(edev, "40G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
+ break;
+ case SPEED_50000:
+ if (!(current_link.supported_caps &
+ QED_LM_50000baseKR2_Full_BIT)) {
+ DP_INFO(edev, "50G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
+ break;
+ case SPEED_100000:
+ if (!(current_link.supported_caps &
+ QED_LM_100000baseKR4_Full_BIT)) {
+ DP_INFO(edev, "100G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
break;
default:
- DP_INFO(edev, "Unsupported speed %u\n", speed);
+ DP_INFO(edev, "Unsupported speed %u\n", base->speed);
return -EINVAL;
}
}
@@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
- edev->dp_module;
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
}
static void qede_set_msglevel(struct net_device *ndev, u32 level)
@@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_params link_params;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev,
rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs;
- for_each_rss(i) {
+ for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u8)i, sb_id);
@@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev,
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
if (epause->autoneg) {
- if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
+ if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
DP_INFO(edev, "autoneg not supported\n");
return -EINVAL;
}
@@ -619,45 +734,70 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
- channels->combined_count = QEDE_RSS_CNT(edev);
+ channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
+ edev->fp_num_rx;
+ channels->tx_count = edev->fp_num_tx;
+ channels->rx_count = edev->fp_num_rx;
}
static int qede_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qede_dev *edev = netdev_priv(dev);
+ u32 count;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count,
channels->other_count, channels->combined_count);
- /* We don't support separate rx / tx, nor `other' channels. */
- if (channels->rx_count || channels->tx_count ||
- channels->other_count || (channels->combined_count == 0) ||
- (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
+ count = channels->rx_count + channels->tx_count +
+ channels->combined_count;
+
+ /* We don't support `other' channels */
+ if (channels->other_count) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"command parameters not supported\n");
return -EINVAL;
}
+ if (!(channels->combined_count || (channels->rx_count &&
+ channels->tx_count))) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "need to request at least one transmit and one receive channel\n");
+ return -EINVAL;
+ }
+
+ if (count > QEDE_MAX_RSS_CNT(edev)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "requested channels = %d max supported channels = %d\n",
+ count, QEDE_MAX_RSS_CNT(edev));
+ return -EINVAL;
+ }
+
/* Check if there was a change in the active parameters */
- if (channels->combined_count == QEDE_RSS_CNT(edev)) {
+ if ((count == QEDE_QUEUE_CNT(edev)) &&
+ (channels->tx_count == edev->fp_num_tx) &&
+ (channels->rx_count == edev->fp_num_rx)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"No change in active parameters\n");
return 0;
}
/* We need the number of queues to be divisible between the hwfns */
- if (channels->combined_count % edev->dev_info.common.num_hwfns) {
+ if ((count % edev->dev_info.common.num_hwfns) ||
+ (channels->tx_count % edev->dev_info.common.num_hwfns) ||
+ (channels->rx_count % edev->dev_info.common.num_hwfns)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
- "Number of channels must be divisable by %04x\n",
+ "Number of channels must be divisible by %04x\n",
edev->dev_info.common.num_hwfns);
return -EINVAL;
}
/* Set number of queues and reload if necessary */
- edev->req_rss = channels->combined_count;
+ edev->req_queues = count;
+ edev->req_num_tx = channels->tx_count;
+ edev->req_num_rx = channels->rx_count;
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -727,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = edev->num_rss;
+ info->data = QEDE_RSS_COUNT(edev);
return 0;
case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info);
@@ -930,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev)
if (!netif_running(edev->ndev))
return;
- for_each_rss(i) {
+ for_each_queue(i) {
/* Update and reenable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
napi_enable(&edev->fp_array[i].napi);
@@ -942,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
/* Disable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
@@ -952,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev)
static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct sk_buff *skb)
{
- struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
int i, idx, val;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ txq = edev->fp_array[i].txqs;
+ break;
+ }
+ }
+
+ if (!txq) {
+ DP_NOTICE(edev, "Tx path is not available\n");
+ return -1;
+ }
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb;
@@ -1020,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
u8 *data_ptr;
int i;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ rxq = edev->fp_array[i].rxq;
+ break;
+ }
+ }
+
+ if (!rxq) {
+ DP_NOTICE(edev, "Rx path is not available\n");
+ return -1;
+ }
+
/* The packet is expected to receive on rx-queue 0 even though RSS is
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
@@ -1228,8 +1392,8 @@ static int qede_get_tunable(struct net_device *dev,
}
static const struct ethtool_ops qede_ethtool_ops = {
- .get_settings = qede_get_settings,
- .set_settings = qede_set_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
@@ -1260,7 +1424,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .get_settings = qede_get_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a6eb6af8cbe8..b4a56e61631a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -222,7 +222,7 @@ int __init qede_init(void)
{
int ret;
- pr_notice("qede_init: %s\n", version);
+ pr_info("qede_init: %s\n", version);
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
@@ -253,7 +253,8 @@ int __init qede_init(void)
static void __exit qede_cleanup(void)
{
- pr_notice("qede_cleanup called\n");
+ if (debug & QED_LOG_INFO_MASK)
+ pr_info("qede_cleanup called\n");
unregister_netdevice_notifier(&qede_netdev_notifier);
pci_unregister_driver(&qede_pci_driver);
@@ -270,8 +271,7 @@ module_exit(qede_cleanup);
/* Unmap the data and free skb */
static int qede_free_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- int *len)
+ struct qede_tx_queue *txq, int *len)
{
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -329,8 +329,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
static void qede_free_failed_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
- int nbd,
- bool data_split)
+ int nbd, bool data_split)
{
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -339,8 +338,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
@@ -366,8 +364,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return again prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
/* Free skb */
dev_kfree_skb_any(skb);
@@ -376,8 +373,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
}
static u32 qede_xmit_type(struct qede_dev *edev,
- struct sk_buff *skb,
- int *ipv6_ext)
+ struct sk_buff *skb, int *ipv6_ext)
{
u32 rc = XMIT_L4_CSUM;
__be16 l3_proto;
@@ -434,15 +430,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
}
static int map_frag_to_bd(struct qede_dev *edev,
- skb_frag_t *frag,
- struct eth_tx_bd *bd)
+ skb_frag_t *frag, struct eth_tx_bd *bd)
{
dma_addr_t mapping;
/* Map skb non-linear frag data for DMA */
mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
return -ENOMEM;
@@ -504,9 +498,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
}
/* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
struct netdev_queue *netdev_txq;
@@ -526,12 +519,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
- WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
- (MAX_SKB_FRAGS + 1));
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
@@ -606,6 +598,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
}
+ /* Legacy FW had flipped behavior in regard to this bit -
+ * I.e., needed to set to prevent FW from touching encapsulated
+ * packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ first_bd->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -731,6 +731,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
+ txq->stopped_cnt++;
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
/* paired memory barrier is in qede_tx_int(), we have to keep
@@ -764,8 +765,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
}
-static int qede_tx_int(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
@@ -791,6 +791,7 @@ static int qede_tx_int(struct qede_dev *edev,
bytes_compl += len;
pkts_compl++;
txq->sw_tx_cons++;
+ txq->xmit_pkts++;
}
netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
@@ -963,8 +964,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
static u32 qede_get_rxhash(struct qede_dev *edev,
u8 bitfields,
- __le32 rss_hash,
- enum pkt_hash_types *rxhash_type)
+ __le32 rss_hash, enum pkt_hash_types *rxhash_type)
{
enum rss_hash_type htype;
@@ -993,12 +993,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
static inline void qede_skb_receive(struct qede_dev *edev,
struct qede_fastpath *fp,
- struct sk_buff *skb,
- u16 vlan_tag)
+ struct sk_buff *skb, u16 vlan_tag)
{
if (vlan_tag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
}
@@ -1021,8 +1019,7 @@ static void qede_set_gro_params(struct qede_dev *edev,
static int qede_fill_frag_skb(struct qede_dev *edev,
struct qede_rx_queue *rxq,
- u8 tpa_agg_index,
- u16 len_on_bd)
+ u8 tpa_agg_index, u16 len_on_bd)
{
struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
NUM_RX_BDS_MAX];
@@ -1209,7 +1206,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#endif
send_skb:
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1413,7 +1410,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
edev->ops->eth_cqe_completion(
- edev->cdev, fp->rss_id,
+ edev->cdev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe;
}
@@ -1470,7 +1467,7 @@ alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
+ "skb allocation failed, dropping incoming packet\n");
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
goto next_cqe;
@@ -1578,14 +1575,13 @@ alloc_skb:
skb->protocol = eth_type_trans(skb, edev->ndev);
rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
- fp_cqe->rss_hash,
- &rxhash_type);
+ fp_cqe->rss_hash, &rxhash_type);
skb_set_hash(skb, rx_hash, rxhash_type);
qede_set_skb_csum(skb, csum_flag);
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
next_rx_only:
@@ -1604,6 +1600,8 @@ next_cqe: /* don't consume bd rx buffer */
/* Update producers */
qede_update_rx_prod(edev, rxq);
+ rxq->rcv_pkts += rx_pkt;
+
return rx_pkt;
}
@@ -1616,10 +1614,12 @@ static int qede_poll(struct napi_struct *napi, int budget)
u8 tc;
for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
+ if (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]);
- rx_work_done = qede_has_rx_work(fp->rxq) ?
+ rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
qed_sb_update_sb_idx(fp->sb_info);
@@ -1639,8 +1639,10 @@ static int qede_poll(struct napi_struct *napi, int budget)
rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
+ if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ||
+ (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_has_tx_work(fp)))) {
napi_complete(napi);
/* Update and reenable interrupts */
@@ -1711,6 +1713,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
+ edev->stats.ttl0_discard = stats.ttl0_discard;
edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
@@ -1790,9 +1794,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
}
-static struct rtnl_link_stats64 *qede_get_stats64(
- struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -2106,8 +2110,7 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
}
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "marked vlan %d as non-configured\n",
- vlan->vid);
+ "marked vlan %d as non-configured\n", vlan->vid);
}
edev->accept_any_vlan = false;
@@ -2149,7 +2152,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2160,7 +2163,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->geneve_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2184,7 +2187,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2195,7 +2198,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->geneve_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2240,15 +2243,13 @@ static const struct net_device_ops qede_netdev_ops = {
static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct pci_dev *pdev,
struct qed_dev_eth_info *info,
- u32 dp_module,
- u8 dp_level)
+ u32 dp_module, u8 dp_level)
{
struct net_device *ndev;
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues,
- info->num_queues);
+ info->num_queues, info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -2264,6 +2265,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+ info->num_queues, info->num_queues);
+
SET_NETDEV_DEV(ndev, &pdev->dev);
memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2352,7 +2356,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
struct qede_fastpath *fp;
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
fp = &edev->fp_array[i];
kfree(fp->sb_info);
@@ -2361,22 +2365,33 @@ static void qede_free_fp_array(struct qede_dev *edev)
}
kfree(edev->fp_array);
}
- edev->num_rss = 0;
+
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
}
static int qede_alloc_fp_array(struct qede_dev *edev)
{
+ u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
int i;
- edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
sizeof(*edev->fp_array), GFP_KERNEL);
if (!edev->fp_array) {
DP_NOTICE(edev, "fp array allocation failed\n");
goto err;
}
- for_each_rss(i) {
+ fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+ /* Allocate the FP elements for Rx queues followed by combined and then
+ * the Tx. This ordering should be maintained so that the respective
+ * queues (Rx or Tx) will be together in the fastpath array and the
+ * associated ids will be sequential.
+ */
+ for_each_queue(i) {
fp = &edev->fp_array[i];
fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
@@ -2385,16 +2400,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev, "RXQ struct allocation failed\n");
- goto err;
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else if (fp_combined) {
+ fp->type = QEDE_FASTPATH_COMBINED;
+ fp_combined--;
+ } else {
+ fp->type = QEDE_FASTPATH_TX;
}
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev, "TXQ array allocation failed\n");
- goto err;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
+ GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev,
+ "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev,
+ "RXQ struct allocation failed\n");
+ goto err;
+ }
}
}
@@ -2456,7 +2488,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
bool is_vf, enum qede_probe_mode mode)
{
struct qed_probe_params probe_params;
- struct qed_slowpath_params params;
+ struct qed_slowpath_params sp_params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
struct qed_dev *cdev;
@@ -2479,14 +2511,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_update_pf_params(cdev);
/* Start the Slowpath-process */
- memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = QED_INT_MODE_MSIX;
- params.drv_major = QEDE_MAJOR_VERSION;
- params.drv_minor = QEDE_MINOR_VERSION;
- params.drv_rev = QEDE_REVISION_VERSION;
- params.drv_eng = QEDE_ENGINEERING_VERSION;
- strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
- rc = qed_ops->common->slowpath_start(cdev, &params);
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDE_MAJOR_VERSION;
+ sp_params.drv_minor = QEDE_MINOR_VERSION;
+ sp_params.drv_rev = QEDE_REVISION_VERSION;
+ sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
goto err1;
@@ -2589,7 +2621,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
qed_ops->common->remove(cdev);
- pr_notice("Ending successfully qede_remove\n");
+ dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
static void qede_remove(struct pci_dev *pdev)
@@ -2608,8 +2640,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
u16 rss_num;
/* Setup queues according to possible resources*/
- if (edev->req_rss)
- rss_num = edev->req_rss;
+ if (edev->req_queues)
+ rss_num = edev->req_queues;
else
rss_num = netif_get_num_default_rss_queues() *
edev->dev_info.common.num_hwfns;
@@ -2619,11 +2651,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
if (rc > 0) {
/* Managed to request interrupts for our queues */
- edev->num_rss = rc;
+ edev->num_queues = rc;
DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
- QEDE_RSS_CNT(edev), rss_num);
+ QEDE_QUEUE_CNT(edev), rss_num);
rc = 0;
}
+
+ edev->fp_num_tx = edev->req_num_tx;
+ edev->fp_num_rx = edev->req_num_rx;
+
return rc;
}
@@ -2637,16 +2673,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
/* This function allocates fast-path status block memory */
static int qede_alloc_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = dma_alloc_coherent(&edev->pdev->dev,
- sizeof(*sb_virt),
- &sb_phys, GFP_KERNEL);
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
@@ -2678,16 +2712,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
data = rx_buf->data;
dma_unmap_page(&edev->pdev->dev,
- rx_buf->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
__free_page(data);
}
}
-static void qede_free_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
int i;
if (edev->gro_disable)
@@ -2706,8 +2739,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
}
}
-static void qede_free_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
qede_free_sge_mem(edev, rxq);
@@ -2729,9 +2761,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
struct page *data;
- u16 rx_buf_size;
-
- rx_buf_size = rxq->rx_buf_size;
data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
@@ -2766,8 +2795,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static int qede_alloc_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
dma_addr_t mapping;
int i;
@@ -2814,15 +2842,14 @@ err:
}
/* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
- edev->ndev->mtu;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
if (rxq->rx_buf_size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE;
@@ -2876,8 +2903,7 @@ err:
return rc;
}
-static void qede_free_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
kfree(txq->sw_tx_ring);
@@ -2887,8 +2913,7 @@ static void qede_free_mem_txq(struct qede_dev *edev,
}
/* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
int size, rc;
union eth_tx_bd_types *p_virt;
@@ -2920,41 +2945,45 @@ err:
}
/* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int tc;
qede_free_mem_sb(edev, fp->sb_info);
- qede_free_mem_rxq(edev, fp->rxq);
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_free_mem_rxq(edev, fp->rxq);
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_TX)
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
- * which contains status block, one rx queue and multiple per-TC tx queues.
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
*/
-static int qede_alloc_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int rc, tc;
- rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
- if (rc)
- goto err;
-
- rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
goto err;
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc)
goto err;
}
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+ }
+
return 0;
err:
return rc;
@@ -2964,7 +2993,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
qede_free_mem_fp(edev, fp);
@@ -2974,16 +3003,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
/* This function allocates all qede memory at NIC load. */
static int qede_alloc_mem_load(struct qede_dev *edev)
{
- int rc = 0, rss_id;
+ int rc = 0, queue_id;
- for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
- struct qede_fastpath *fp = &edev->fp_array[rss_id];
+ for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[queue_id];
rc = qede_alloc_mem_fp(edev, fp);
if (rc) {
DP_ERR(edev,
"Failed to allocate memory for fastpath - rss id = %d\n",
- rss_id);
+ queue_id);
qede_free_mem_load(edev);
return rc;
}
@@ -2995,30 +3024,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int rss_id, txq_index, tc;
+ int queue_id, rxq_index = 0, txq_index = 0, tc;
struct qede_fastpath *fp;
- for_each_rss(rss_id) {
- fp = &edev->fp_array[rss_id];
+ for_each_queue(queue_id) {
+ fp = &edev->fp_array[queue_id];
fp->edev = edev;
- fp->rss_id = rss_id;
+ fp->id = queue_id;
memset((void *)&fp->napi, 0, sizeof(fp->napi));
memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
- fp->rxq->rxq_id = rss_id;
+ if (fp->type & QEDE_FASTPATH_RX) {
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rxq_index++;
+ }
- memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
- fp->txqs[tc].index = txq_index;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ memset((void *)fp->txqs, 0,
+ (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ fp->txqs[tc].index = txq_index +
+ tc * QEDE_TSS_COUNT(edev);
+ if (edev->dev_info.is_legacy)
+ fp->txqs[tc].is_legacy = true;
+ }
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
- edev->ndev->name, rss_id);
+ edev->ndev->name, queue_id);
}
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
@@ -3028,12 +3065,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
return rc;
@@ -3046,7 +3084,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
netif_napi_del(&edev->fp_array[i].napi);
@@ -3058,7 +3096,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
int i;
/* Add NAPI objects */
- for_each_rss(i) {
+ for_each_queue(i) {
netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
qede_poll, NAPI_POLL_WEIGHT);
napi_enable(&edev->fp_array[i].napi);
@@ -3087,14 +3125,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
int i, rc;
/* Sanitize number of interrupts == number of prepared RSS queues */
- if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
DP_ERR(edev,
"Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
- QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
return -EINVAL;
}
- for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -3139,18 +3177,17 @@ static int qede_setup_irqs(struct qede_dev *edev)
/* qed should learn receive the RSS ids and callbacks */
ops = edev->ops->common;
- for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
ops->simd_handler_config(edev->cdev,
&edev->fp_array[i], i,
qede_simd_fp_handler);
- edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
}
return 0;
}
static int qede_drain_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- bool allow_drain)
+ struct qede_tx_queue *txq, bool allow_drain)
{
int rc, cnt = 1000;
@@ -3202,45 +3239,53 @@ static int qede_stop_queues(struct qede_dev *edev)
}
/* Flush Tx queues. If needed, request drain from MCP */
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
}
}
- /* Stop all Queues in reverse order*/
- for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params;
- /* Stop the Tx Queue(s)*/
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
-
- tx_params.rss_id = i;
- tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
- rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
+ /* Stop the Tx Queue(s) */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+ u8 val;
+
+ tx_params.rss_id = i;
+ val = edev->fp_array[i].txqs[tc].index;
+ tx_params.tx_queue_id = val;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
}
}
- /* Stop the Rx Queue*/
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = i;
+ /* Stop the Rx Queue */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
- rc = edev->ops->q_rx_stop(cdev, &rx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
}
}
@@ -3263,7 +3308,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
- if (!edev->num_rss) {
+ if (!edev->num_queues) {
DP_ERR(edev,
"Cannot update V-VPORT as active as there are no Rx queues\n");
return -EINVAL;
@@ -3287,50 +3332,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
-
- memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- rc = edev->ops->q_rx_start(cdev, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- phys_table,
- fp->rxq->rx_comp_ring.page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
- if (rc) {
- DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
- return rc;
- }
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ struct qede_rx_queue *rxq = fp->rxq;
+ __le16 *val;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = rxq->rxq_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ p_phys_table =
+ qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+ page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ rxq->rx_buf_size,
+ rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt,
+ &rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+ rc);
+ return rc;
+ }
- fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ rxq->hw_cons_ptr = val;
- qede_update_rx_prod(edev, fp->rxq);
+ qede_update_rx_prod(edev, rxq);
+ }
+
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
for (tc = 0; tc < edev->num_tc; tc++) {
struct qede_tx_queue *txq = &fp->txqs[tc];
- int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
q_params.rss_id = i;
- q_params.queue_id = txq_index;
+ q_params.queue_id = txq->index;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
q_params.sb_idx = TX_PI(tc);
rc = edev->ops->q_tx_start(cdev, &q_params,
- txq->tx_pbl.pbl.p_phys_table,
- txq->tx_pbl.page_cnt,
+ p_phys_table, page_cnt,
&txq->doorbell_addr);
if (rc) {
DP_ERR(edev, "Start TXQ #%d failed %d\n",
- txq_index, rc);
+ txq->index, rc);
return rc;
}
@@ -3361,13 +3422,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
/* Fill struct with RSS params */
- if (QEDE_RSS_CNT(edev) > 1) {
+ if (QEDE_RSS_COUNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
/* Need to validate current RSS config uses valid entries */
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
if (edev->rss_params.rss_ind_table[i] >=
- edev->num_rss) {
+ QEDE_RSS_COUNT(edev)) {
reset_rss_indir = true;
break;
}
@@ -3380,7 +3441,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
u16 indir_val;
- val = QEDE_RSS_CNT(edev);
+ val = QEDE_RSS_COUNT(edev);
indir_val = ethtool_rxfh_indir_default(i, val);
edev->rss_params.rss_ind_table[i] = indir_val;
}
@@ -3509,7 +3570,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
if (rc)
goto err1;
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_RSS_CNT(edev), edev->num_tc);
+ QEDE_QUEUE_CNT(edev), edev->num_tc);
rc = qede_set_real_num_queues(edev);
if (rc)
@@ -3562,7 +3623,9 @@ err2:
err1:
edev->ops->common->set_fp_int(edev->cdev, 0);
qede_free_fp_array(edev);
- edev->num_rss = 0;
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
err0:
return rc;
}