summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c519
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2619
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c155
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2221
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c586
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c334
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h40
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c990
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
27 files changed, 5399 insertions, 2450 deletions
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6409a06bbdf6..fd362b6923f4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2891,7 +2891,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
@@ -2919,7 +2919,7 @@ netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
@@ -2960,7 +2960,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -2981,7 +2981,7 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -3018,7 +3018,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
struct net_device *netdev = adapter->netdev;
struct netxen_dimm_cfg dimm;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1292c360390c..fcb8e9ba51d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,7 +26,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.4.0.0"
+#define DRV_MODULE_VERSION "8.7.0.0"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -70,8 +70,8 @@ struct qed_sb_sp_info;
struct qed_mcp_info;
struct qed_rt_data {
- u32 init_val;
- bool b_valid;
+ u32 *init_val;
+ bool *b_valid;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -120,6 +120,10 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_1X25G
};
+enum qed_dev_cap {
+ QED_DEV_CAP_ETH,
+};
+
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
@@ -142,15 +146,13 @@ struct qed_hw_info {
u16 ovlan;
u32 part_num[4];
- u32 vendor_id;
- u32 device_id;
-
unsigned char hw_mac_addr[ETH_ALEN];
struct qed_igu_info *p_igu_info;
u32 port_mode;
u32 hw_mode;
+ unsigned long device_capabilities;
};
struct qed_hw_cid_data {
@@ -267,7 +269,7 @@ struct qed_hwfn {
struct qed_hw_info hw_info;
/* rt_array (for init-tool) */
- struct qed_rt_data *rt_data;
+ struct qed_rt_data rt_data;
/* SPQ */
struct qed_spq *p_spq;
@@ -301,6 +303,9 @@ struct qed_hwfn {
bool b_int_enabled;
bool b_int_requested;
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids;
@@ -350,9 +355,20 @@ struct qed_dev {
char name[NAME_SIZE];
u8 type;
-#define QED_DEV_TYPE_BB_A0 (0 << 0)
-#define QED_DEV_TYPE_MASK (0x3)
-#define QED_DEV_TYPE_SHIFT (0)
+#define QED_DEV_TYPE_BB (0 << 0)
+#define QED_DEV_TYPE_AH BIT(0)
+/* Translate type/revision combo into the proper conditions */
+#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
+#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
+ CHIP_REV_IS_A0(dev))
+#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
+ CHIP_REV_IS_B0(dev))
+
+#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+ QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+ u16 vendor_id;
+ u16 device_id;
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -361,6 +377,8 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
+#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
+#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
@@ -375,10 +393,10 @@ struct qed_dev {
u8 num_funcs_in_port;
u8 path_id;
- enum mf_mode mf_mode;
-#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
-#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
-#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+ enum qed_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
int pcie_width;
int pcie_speed;
@@ -441,11 +459,6 @@ struct qed_dev {
const struct firmware *firmware;
};
-#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
- QED_DEV_TYPE_SHIFT)
-#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
-#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
-
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7ccdb46c6764..fc767c07a264 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr;
u32 i;
- p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
if (!p_mngr) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
return -ENOMEM;
@@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.num_pf_cids = iids.cids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs;
- params.start_vport = qm_info->num_vports;
+ params.start_vport = qm_info->start_vport;
+ params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
params.pq_params = qm_info->qm_pq_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 817bbd5476ff..b7d100f6bd6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -32,6 +32,33 @@
#include "qed_sp.h"
/* API common to all protocols */
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+ if (val)
+ return 1 << (val + 15);
+
+ /* Old MFW initialized above registered only conditionally */
+ if (p_hwfn->cdev->num_hwfns > 1) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
+ }
+}
+
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module, u8 dp_level)
{
@@ -134,17 +161,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_ATOMIC);
+ num_pqs, GFP_KERNEL);
if (!qm_info->qm_pq_params)
goto alloc_err;
qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_ATOMIC);
+ num_vports, GFP_KERNEL);
if (!qm_info->qm_vport_params)
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_ATOMIC);
+ MAX_NUM_PORTS, GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -341,11 +368,6 @@ void qed_resc_setup(struct qed_dev *cdev)
}
}
-#define FINAL_CLEANUP_CMD_OFFSET (0)
-#define FINAL_CLEANUP_CMD (0x1)
-#define FINAL_CLEANUP_VALID_OFFSET (6)
-#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
-#define FINAL_CLEANUP_COMP (0x2)
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
@@ -355,12 +377,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
- command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
- command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
- command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
- command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
@@ -396,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_A0);
+ hw_mode = (1 << MODE_BB_B0);
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -415,18 +439,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
}
switch (p_hwfn->cdev->mf_mode) {
- case SF:
- hw_mode |= 1 << MODE_SF;
+ case QED_MF_DEFAULT:
+ case QED_MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
break;
- case MF_OVLAN:
+ case QED_MF_OVLAN:
hw_mode |= 1 << MODE_MF_SD;
break;
- case MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
- hw_mode |= 1 << MODE_SF;
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ hw_mode |= 1 << MODE_MF_SI;
}
hw_mode |= 1 << MODE_ASIC;
@@ -655,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
- struct qed_storm_stats *p_stat;
- u32 load_code, param, *p_address;
+ u32 load_code, param;
int rc, mfw_rc, i;
- u8 fw_vport = 0;
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@@ -667,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
- if (rc != 0)
- return rc;
-
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -734,35 +750,60 @@ int qed_hw_init(struct qed_dev *cdev,
}
p_hwfn->hw_init_done = true;
+ }
+
+ return 0;
+}
- /* init PF stats */
- p_stat = &p_hwfn->storm_stats;
- p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static inline void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int i;
- p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ /* close timers */
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
- p_address = &p_stat->tstats.address;
- *p_address = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ usleep_range(1000, 2000);
}
- return 0;
+ if (i < QED_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void qed_hw_timers_stop_all(struct qed_dev *cdev)
+{
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+ }
}
-#define QED_HW_STOP_RETRY_LIMIT (10)
int qed_hw_stop(struct qed_dev *cdev)
{
int rc = 0, t_rc;
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -775,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev)
rc = qed_sp_pf_stop(p_hwfn);
if (rc)
- return rc;
+ DP_NOTICE(p_hwfn,
+ "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -786,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
/* Disable Attention Generation */
qed_int_igu_disable_int(p_hwfn, p_ptt);
@@ -832,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev)
void qed_hw_stop_fastpath(struct qed_dev *cdev)
{
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -851,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
-
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
@@ -954,18 +960,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
}
/* Setup bar access */
-static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
- int rc;
-
- /* Allocate PTT pool */
- rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc)
- return rc;
-
- /* Allocate the main PTT */
- p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
-
/* clear indirect access */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
@@ -980,8 +976,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
- return 0;
}
static void get_function_id(struct qed_hwfn *p_hwfn)
@@ -1016,14 +1010,17 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
+ struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i;
- num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
- : p_hwfn->cdev->num_ports_in_engines;
+ num_funcs = MAX_NUM_PFS_BB;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
(MAX_SB_PER_PATH_BB / num_funcs),
- qed_int_get_num_sbs(p_hwfn, NULL));
+ sb_cnt_info.sb_cnt);
resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
@@ -1071,7 +1068,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
- u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct qed_mcp_link_params *link;
/* Read global nvm_cfg address */
@@ -1086,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
- /* Read Vendor Id / Device Id */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, glob) +
- offsetof(struct nvm_cfg1_glob, pci_id);
- p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
- NVM_CFG1_GLOB_VENDOR_ID_MASK;
-
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, core_cfg);
@@ -1134,21 +1124,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
break;
}
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
- offsetof(struct nvm_cfg1_func, device_id);
- val = qed_rd(p_hwfn, p_ptt, addr);
-
- if (IS_MF(p_hwfn)) {
- p_hwfn->hw_info.device_id =
- (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
- NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
- } else {
- p_hwfn->hw_info.device_id =
- (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
- NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
- }
-
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -1220,18 +1195,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- p_hwfn->cdev->mf_mode = MF_OVLAN;
+ p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->cdev->mf_mode = MF_NPAR;
+ p_hwfn->cdev->mf_mode = QED_MF_NPAR;
break;
- case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
- p_hwfn->cdev->mf_mode = SF;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
break;
}
DP_INFO(p_hwfn, "Multi function mode is %08x\n",
p_hwfn->cdev->mf_mode);
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ __set_bit(QED_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
@@ -1291,31 +1276,38 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_dev *cdev)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 tmp;
- cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ /* Read Vendor Id / Device Id */
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
+ &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
+ &cdev->device_id);
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
- cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev);
+ cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
- tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
- if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
cdev->num_hwfns = 2;
} else {
cdev->num_hwfns = 1;
}
- cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
- cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
@@ -1323,6 +1315,14 @@ static void qed_get_dev_info(struct qed_dev *cdev)
"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
+
+ if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
+ DP_NOTICE(cdev->hwfns,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
@@ -1345,15 +1345,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
get_function_id(p_hwfn);
- rc = qed_hw_hwfn_prepare(p_hwfn);
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
goto err0;
}
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
/* First hwfn learns basic information, e.g., number of hwfns */
- if (!p_hwfn->my_id)
- qed_get_dev_info(p_hwfn->cdev);
+ if (!p_hwfn->my_id) {
+ rc = qed_get_dev_info(p_hwfn->cdev);
+ if (rc != 0)
+ goto err1;
+ }
+
+ qed_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
@@ -1385,17 +1394,6 @@ err0:
return rc;
}
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- u8 bar_id)
-{
- u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
- : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
-
- /* Get the BAR size(in KB) from hardware given val */
- return 1 << (val + 15);
-}
-
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
@@ -1420,11 +1418,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
p_regview = addr;
/* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -1536,223 +1534,6 @@ void qed_chain_free(struct qed_dev *cdev,
p_chain->p_phys_addr);
}
-static void __qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- int i, j;
-
- memset(stats, 0, sizeof(*stats));
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct tstorm_per_port_stat tstats;
- struct port_stats port_stats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- p_hwfn->storm_stats.mstats.address,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- p_hwfn->storm_stats.ustats.address,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- p_hwfn->storm_stats.pstats.address,
- p_hwfn->storm_stats.pstats.len);
-
- memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- p_hwfn->storm_stats.tstats.address,
- p_hwfn->storm_stats.tstats.len);
-
- memset(&port_stats, 0, sizeof(port_stats));
-
- if (p_hwfn->mcp_info)
- qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, stats),
- sizeof(port_stats));
- qed_ptt_release(p_hwfn, p_ptt);
-
- stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
- stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
- stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
- stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
-
- stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
-
- stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
-
- stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
- stats->rx_64_byte_packets += port_stats.pmm.r64;
- stats->rx_127_byte_packets += port_stats.pmm.r127;
- stats->rx_255_byte_packets += port_stats.pmm.r255;
- stats->rx_511_byte_packets += port_stats.pmm.r511;
- stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- stats->rx_16383_byte_packets += port_stats.pmm.r16383;
- stats->rx_crc_errors += port_stats.pmm.rfcs;
- stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- stats->rx_pause_frames += port_stats.pmm.rxpf;
- stats->rx_pfc_frames += port_stats.pmm.rxpp;
- stats->rx_align_errors += port_stats.pmm.raln;
- stats->rx_carrier_errors += port_stats.pmm.rfcr;
- stats->rx_oversize_packets += port_stats.pmm.rovr;
- stats->rx_jabbers += port_stats.pmm.rjbr;
- stats->rx_undersize_packets += port_stats.pmm.rund;
- stats->rx_fragments += port_stats.pmm.rfrg;
- stats->tx_64_byte_packets += port_stats.pmm.t64;
- stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- stats->tx_pause_frames += port_stats.pmm.txpf;
- stats->tx_pfc_frames += port_stats.pmm.txpp;
- stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- stats->tx_total_collisions += port_stats.pmm.tncl;
- stats->rx_mac_bytes += port_stats.pmm.rbyte;
- stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- stats->tx_mac_bytes += port_stats.pmm.tbyte;
- stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
-
- for (j = 0; j < 8; j++) {
- stats->brb_truncates += port_stats.brb.brb_truncate[j];
- stats->brb_discards += port_stats.brb.brb_discard[j];
- }
- }
-}
-
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- u32 i;
-
- if (!cdev) {
- memset(stats, 0, sizeof(*stats));
- return;
- }
-
- __qed_get_vport_stats(cdev, stats);
-
- if (!cdev->reset_stats)
- return;
-
- /* Reduce the statistics baseline */
- for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
- ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
-}
-
-/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
-void qed_reset_vport_stats(struct qed_dev *cdev)
-{
- int i;
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.mstats.address,
- &mstats,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.ustats.address,
- &ustats,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.pstats.address,
- &pstats,
- p_hwfn->storm_stats.pstats.len);
-
- qed_ptt_release(p_hwfn, p_ptt);
- }
-
- /* PORT statistics are not necessarily reset, so we need to
- * read and create a baseline for future statistics.
- */
- if (!cdev->reset_stats)
- DP_INFO(cdev, "Reset stats not allocated\n");
- else
- __qed_get_vport_stats(cdev, cdev->reset_stats);
-}
-
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e29a3ba6c8b0..d6c7ddf4f4d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -78,6 +78,15 @@ int qed_hw_init(struct qed_dev *cdev,
const u8 *bin_fw_data);
/**
+ * @brief qed_hw_timers_stop_all - stop the timers HW block
+ *
+ * @param cdev
+ *
+ * @return void
+ */
+void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+/**
* @brief qed_hw_stop -
*
* @param cdev
@@ -156,8 +165,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
enum qed_dmae_address_type_t {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 264e954675d1..a368f5e71d95 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -34,6 +34,8 @@ enum common_event_opcode {
COMMON_EVENT_RESERVED3,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
+ COMMON_EVENT_RESERVED6,
+ COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
@@ -45,6 +47,7 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_RESERVED,
COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -331,6 +334,179 @@ struct xstorm_core_conn_ag_ctx {
__le16 word15 /* word15 */;
};
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
__le32 reserved[24];
@@ -349,8 +525,9 @@ struct core_conn_context {
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct regpair mstorm_st_padding[2];
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2] /* padding */;
};
@@ -397,10 +574,12 @@ union event_ring_element {
};
enum personality_type {
+ BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
PERSONALITY_RESERVED2,
PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
PERSONALITY_RESERVED3,
+ PERSONALITY_CORE,
PERSONALITY_ETH /* Ethernet */,
PERSONALITY_RESERVED4,
MAX_PERSONALITY_TYPE
@@ -570,7 +749,7 @@ enum block_addr {
GRCBASE_NWM = 0x800000,
GRCBASE_NWS = 0x700000,
GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -789,19 +968,19 @@ struct igu_msix_vector {
enum init_modes {
MODE_BB_A0,
- MODE_RESERVED,
+ MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
+ MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
MODE_PORTS_PER_ENG_1,
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
- MODE_40G,
MODE_100G,
MODE_EAGLE_ENG1_WORKAROUND,
MAX_INIT_MODES
@@ -816,43 +995,6 @@ enum init_phases {
MAX_INIT_PHASES
};
-struct mstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -945,6 +1087,17 @@ struct qm_rf_pq_map {
#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
/* SDM operation gen command (generate aggregative interrupt) */
struct sdm_op_gen {
__le32 command;
@@ -956,223 +1109,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-struct tstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
-};
-
-struct ustorm_core_conn_ag_ctx {
- u8 reserved /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
- u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 word1 /* word1 */;
- __le32 rx_producers /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
-};
-
-struct ystorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
- __le16 word4 /* word4 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
-};
-
/*********************************** Init ************************************/
/* Width of GRC address in bits (addresses are specified in dwords) */
@@ -1274,13 +1210,6 @@ enum chip_ids {
MAX_CHIP_IDS
};
-enum idle_chk_severity_types {
- IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- IDLE_CHK_SEVERITY_WARNING,
- MAX_IDLE_CHK_SEVERITY_TYPES
-};
-
struct init_array_raw_hdr {
__le32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
@@ -1340,14 +1269,6 @@ struct init_callback_op {
__le16 block_id /* Blocks ID */;
};
-/* init comparison types */
-enum init_comparison_types {
- INIT_COMPARISON_EQ /* init value is included in the init command */,
- INIT_COMPARISON_OR /* init value is all zeros */,
- INIT_COMPARISON_AND /* init value is an array of values */,
- MAX_INIT_COMPARISON_TYPES
-};
-
/* init operation: delay */
struct init_delay_op {
__le32 op_data;
@@ -1444,12 +1365,10 @@ struct init_read_op {
__le32 op_data;
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_COMP_MASK 0x7
-#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 7
-#define INIT_READ_OP_POLL_MASK 0x1
-#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
@@ -1477,6 +1396,14 @@ enum init_op_types {
MAX_INIT_OP_TYPES
};
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
/* init source types */
enum init_source_types {
INIT_SRC_INLINE /* init value is included in the init command */,
@@ -1677,175 +1604,213 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 num_pqs);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
- ((port_id) * \
- IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
- ((vf_id) * \
- IRO[2].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
- ((pf_id) * \
- IRO[4].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[4].size)
-/* Ustorm Completion ring consumer */
-#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
- ((global_queue_id) * \
- IRO[5].m1))
-#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
+ (IRO[6].base + ((global_queue_id) * IRO[6].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
- ((core_rx_queue_id) * \
- IRO[12].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
-/* Tstorm LiteL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
- ((core_rx_q_id) * \
- IRO[13].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
- ((core_rx_q_id) * \
- IRO[14].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
- ((core_txst_id) * \
- IRO[15].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
- ((stat_counter_id) * \
- IRO[16].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[17].base + ((stat_counter_id) * IRO[17].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
- ((queue_id) * \
- IRO[17].m1))
-#define MSTORM_PRODS_SIZE (IRO[17].size)
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1))
+#define MSTORM_PRODS_SIZE (IRO[18].size)
/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
- ((stat_counter_id) * \
- IRO[19].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[20].base + ((stat_counter_id) * IRO[20].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
- ((queue_id) * \
- IRO[20].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[21].base + ((queue_id) * IRO[21].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
- ((stat_counter_id) * \
- IRO[21].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
- ((pf_id) * \
- IRO[22].m1))
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
- ((queue_id) * \
- IRO[23].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[25].base + ((queue_id) * IRO[25].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
- ((rss_id) * \
- IRO[24].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[26].base + ((rss_id) * IRO[26].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
- ((rss_id) * \
- IRO[25].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[27].base + ((rss_id) * IRO[27].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
- ((pf_id) * \
- IRO[26].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
- ((cmdq_queue_id) * \
- IRO[27].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
- ((rq_queue_id) * \
- IRO[28].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
+ (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
+/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[34].base + ((pf_id) * IRO[34].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[35].base + ((pf_id) * IRO[35].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
+/* Mstorm FCoE RX stats */
+#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
/* Pstorm RoCE statistics */
-#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
- ((stat_counter_id) * \
- IRO[29].m1))
-#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
+ (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
/* Tstorm RoCE statistics */
-#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
- ((stat_counter_id) * \
- IRO[30].m1))
-#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
-
-static const struct iro iro_arr[31] = {
- { 0x10, 0x0, 0x0, 0x0, 0x8 },
- { 0x4448, 0x60, 0x0, 0x0, 0x60 },
- { 0x498, 0x8, 0x0, 0x0, 0x4 },
- { 0x494, 0x0, 0x0, 0x0, 0x4 },
- { 0x10, 0x8, 0x0, 0x0, 0x2 },
- { 0x90, 0x8, 0x0, 0x0, 0x2 },
- { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
- { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
- { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
- { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
- { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
- { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
- { 0x100, 0x8, 0x0, 0x0, 0x8 },
- { 0x5808, 0x10, 0x0, 0x0, 0x10 },
- { 0xb100, 0x30, 0x0, 0x0, 0x30 },
- { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
- { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
- { 0x200, 0x10, 0x0, 0x0, 0x8 },
- { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
- { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
- { 0xd00, 0x8, 0x0, 0x0, 0x8 },
- { 0x2790, 0x80, 0x0, 0x0, 0x38 },
- { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
- { 0x80, 0x8, 0x0, 0x0, 0x8 },
- { 0xac0, 0x8, 0x0, 0x0, 0x8 },
- { 0x2580, 0x8, 0x0, 0x0, 0x8 },
- { 0x2500, 0x8, 0x0, 0x0, 0x8 },
- { 0x440, 0x8, 0x0, 0x0, 0x2 },
- { 0x1800, 0x8, 0x0, 0x0, 0x2 },
- { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
- { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
+ (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
+
+static const struct iro iro_arr[44] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
+ { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
+ { 0x510, 0x8, 0x0, 0x0, 0x4 },
+ { 0x490, 0x8, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4940, 0x0, 0x0, 0x0, 0x78 },
+ { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
+ { 0x2998, 0x0, 0x0, 0x0, 0x78 },
+ { 0x4750, 0x0, 0x0, 0x0, 0x78 },
+ { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
+ { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb508, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0xa230, 0x0, 0x0, 0x0, 0x4 },
+ { 0x8058, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
+ { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
+ { 0x640, 0x10, 0x8, 0x0, 0x2 },
+ { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
+ { 0x11048, 0x10, 0x0, 0x0, 0x8 },
+ { 0x11678, 0x38, 0x0, 0x0, 0x18 },
+ { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
+ { 0x8700, 0x28, 0x0, 0x0, 0x18 },
+ { 0xec00, 0x10, 0x0, 0x0, 0x10 },
+ { 0xde38, 0x40, 0x0, 0x0, 0x30 },
+ { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
+ { 0xf068, 0x20, 0x0, 0x0, 0x20 },
+ { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
};
/* Runtime array offsets */
@@ -1866,426 +1831,427 @@ static const struct iro iro_arr[31] = {
#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6667
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29836
#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856
#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30507
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30764
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30766
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_OFFSET 30798
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30812
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_RLPFENABLE_RT_OFFSET 30814
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_OFFSET 30848
#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31008
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31009
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_OFFSET 31522
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
-#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_OFFSET 32546
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_OFFSET 33058
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
-
-#define RUNTIME_ARRAY_SIZE 34432
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922
+
+#define RUNTIME_ARRAY_SIZE 33923
-/* The eth storm context for the Ystorm */
-struct ystorm_eth_conn_st_ctx {
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
__le32 reserved[4];
};
@@ -2562,14 +2528,226 @@ struct xstorm_eth_conn_ag_ctx {
__le16 word15 /* word15 */;
};
-/* The eth storm context for the Tstorm */
-struct tstorm_eth_conn_st_ctx {
- __le32 reserved[4];
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
};
-/* The eth storm context for the Mstorm */
-struct mstorm_eth_conn_st_ctx {
- __le32 reserved[8];
+struct ystorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 terminate_spqe /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 tx_bd_cons_upd /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 tx_int_coallecing_timeset /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
};
/* The eth storm context for the Ustorm */
@@ -2577,24 +2755,30 @@ struct ustorm_eth_conn_st_ctx {
__le32 reserved[40];
};
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
/* eth connection context */
struct eth_conn_context {
- struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2] /* padding */;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct tstorm_eth_conn_st_ctx tstorm_st_context;
- struct regpair tstorm_st_padding[2] /* padding */;
- struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
enum eth_filter_action {
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
- ETH_FILTER_ACTION_REPLACE,
+ ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
@@ -2653,6 +2837,32 @@ enum eth_ramrod_cmd_id {
MAX_ETH_RAMROD_CMD_ID
};
+enum eth_tx_err {
+ ETH_TX_ERR_DROP /* Drop erronous packet. */,
+ ETH_TX_ERR_ASSERT_MALICIOUS,
+ MAX_ETH_TX_ERR
+};
+
+struct eth_tx_err_vals {
+ __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
struct eth_vport_rss_config {
__le16 capabilities;
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
@@ -2669,12 +2879,8 @@ struct eth_vport_rss_config {
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
u8 rss_id;
u8 rss_mode;
u8 update_rss_key;
@@ -2713,7 +2919,19 @@ struct eth_vport_rx_mode {
};
struct eth_vport_tpa_param {
- u64 reserved[2];
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
struct eth_vport_tx_mode {
@@ -2749,10 +2967,14 @@ struct rx_queue_start_ramrod_data {
u8 pxp_tph_valid_pkt;
u8 pxp_st_hint;
__le16 pxp_st_index;
- u8 reserved[4];
- struct regpair cqe_pbl_addr;
- struct regpair bd_base;
- struct regpair sge_base;
+ u8 pmd_mode;
+ u8 notify_en;
+ u8 toggle_val;
+ u8 reserved[7];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
};
struct rx_queue_stop_ramrod_data {
@@ -2764,23 +2986,24 @@ struct rx_queue_stop_ramrod_data {
};
struct rx_queue_update_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 init_sge_ring_flg;
- u8 vport_id;
- u8 pxp_tph_valid_sge;
- u8 pxp_st_hint;
- __le16 pxp_st_index;
- u8 reserved[6];
- struct regpair sge_base;
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[4];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+ struct regpair reserved6;
};
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
u8 vport_id;
- u8 tc;
+ u8 reserved0;
u8 stats_counter_id;
__le16 qm_pq_id;
u8 flags;
@@ -2790,18 +3013,25 @@ struct tx_queue_start_ramrod_data {
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
- u8 pin_context;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- __le16 pxp_st_index;
- u8 pxp_st_hint;
- u8 reserved1[3];
- __le16 queue_zone_id;
- __le16 test_dup_count;
- __le16 pbl_size;
- struct regpair pbl_base_addr;
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ __le16 comp_agg_size;
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
};
struct tx_queue_stop_ramrod_data {
@@ -2822,16 +3052,16 @@ struct vport_start_ramrod_data {
struct eth_vport_rx_mode rx_mode;
struct eth_vport_tx_mode tx_mode;
struct eth_vport_tpa_param tpa_param;
- __le16 sge_buff_size;
- u8 max_sges_num;
- u8 tx_switching_en;
- u8 anti_spoofing_en;
- u8 default_vlan_en;
- u8 handle_ptp_pkts;
- u8 silent_vlan_removal_en;
- __le16 default_vlan;
- u8 untagged;
- u8 reserved[7];
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+ u8 zero_placement_offset;
+ u8 reserved[7];
};
struct vport_stop_ramrod_data {
@@ -2840,36 +3070,35 @@ struct vport_stop_ramrod_data {
};
struct vport_update_ramrod_data_cmn {
- u8 vport_id;
- u8 update_rx_active_flg;
- u8 rx_active_flg;
- u8 update_tx_active_flg;
- u8 tx_active_flg;
- u8 update_rx_mode_flg;
- u8 update_tx_mode_flg;
- u8 update_approx_mcast_flg;
- u8 update_rss_flg;
- u8 update_inner_vlan_removal_en_flg;
- u8 inner_vlan_removal_en;
- u8 update_tpa_param_flg;
- u8 update_tpa_en_flg;
- u8 update_sge_param_flg;
- __le16 sge_buff_size;
- u8 max_sges_num;
- u8 update_tx_switching_en_flg;
- u8 tx_switching_en;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_handle_ptp_pkts;
- u8 handle_ptp_pkts;
- u8 update_default_vlan_en_flg;
- u8 default_vlan_en;
- u8 update_default_vlan_flg;
- __le16 default_vlan;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- u8 silent_vlan_removal_en;
- u8 reserved;
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+ __le16 mtu;
+ u8 reserved[2];
};
struct vport_update_ramrod_mcast {
@@ -2885,436 +3114,6 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-struct tstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 rx_bd_cons /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 rx_bd_prod /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
-};
-
-struct ustorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
- u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 tx_bd_cons /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 tx_drv_bd_cons /* word2 */;
- __le16 rx_drv_cqe_cons /* word3 */;
-};
-
-struct xstorm_eth_hw_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
-};
-
#define VF_MAX_STATIC 192 /* In case of K2 */
#define MCP_GLOB_PATH_MAX 2
@@ -3818,6 +3617,13 @@ struct public_port {
struct dcbx_local_params local_admin_dcbx_mib;
struct dcbx_mib remote_dcbx_mib;
struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+ u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
};
/**************************************/
@@ -3830,7 +3636,11 @@ struct public_func {
u32 iscsi_boot_signature;
u32 iscsi_boot_block_offset;
- u32 reserved[8];
+ u32 mtu_size;
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+ u32 reserved[4];
u32 config;
@@ -3894,10 +3704,10 @@ struct public_func {
#define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
-#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
@@ -3905,6 +3715,10 @@ struct public_func {
#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
};
/**************************************/
@@ -3964,6 +3778,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MASK 0xffff0000
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -4100,6 +3915,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -4142,6 +3958,14 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_MAX
};
@@ -4212,7 +4036,7 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
@@ -4643,8 +4467,12 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
-
- u32 reserved[46]; /* 0x88 */
+ u32 device_capabilities; /* 0x88 */
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ u32 power_dissipated; /* 0x8C */
+ u32 power_consumed; /* 0x90 */
+ u32 efi_version; /* 0x94 */
+ u32 reserved[42]; /* 0x98 */
};
struct nvm_cfg1_path {
@@ -4652,26 +4480,8 @@ struct nvm_cfg1_path {
};
struct nvm_cfg1_port {
- u32 power_dissipated; /* 0x0 */
-#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
-#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
-#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
-#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
-#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
-#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
-#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
-#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
-
- u32 power_consumed; /* 0x4 */
-#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
-#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
-#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
-#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
-#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
-#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
-#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
-#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
-
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
u32 generic_cont0; /* 0x8 */
#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
@@ -4699,7 +4509,9 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -4784,10 +4596,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
@@ -4801,9 +4614,6 @@ struct nvm_cfg1_port {
u32 mgmt_traffic; /* 0x20 */
#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
-#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
u32 ext_phy; /* 0x24 */
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
@@ -4814,16 +4624,12 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_MBA_MASK 0x00000001
-#define NVM_CFG1_PORT_MBA_OFFSET 0
-#define NVM_CFG1_PORT_MBA_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_ENABLED 0x1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
@@ -4836,61 +4642,30 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED5_2K 0x1
-#define NVM_CFG1_PORT_RESERVED5_4K 0x2
-#define NVM_CFG1_PORT_RESERVED5_8K 0x3
-#define NVM_CFG1_PORT_RESERVED5_16K 0x4
-#define NVM_CFG1_PORT_RESERVED5_32K 0x5
-#define NVM_CFG1_PORT_RESERVED5_64K 0x6
-#define NVM_CFG1_PORT_RESERVED5_128K 0x7
-#define NVM_CFG1_PORT_RESERVED5_256K 0x8
-#define NVM_CFG1_PORT_RESERVED5_512K 0x9
-#define NVM_CFG1_PORT_RESERVED5_1M 0xA
-#define NVM_CFG1_PORT_RESERVED5_2M 0xB
-#define NVM_CFG1_PORT_RESERVED5_4M 0xC
-#define NVM_CFG1_PORT_RESERVED5_8M 0xD
-#define NVM_CFG1_PORT_RESERVED5_16M 0xE
-#define NVM_CFG1_PORT_RESERVED5_32M 0xF
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
-#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
-#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
-#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED6_4K 0x1
-#define NVM_CFG1_PORT_RESERVED6_8K 0x2
-#define NVM_CFG1_PORT_RESERVED6_16K 0x3
-#define NVM_CFG1_PORT_RESERVED6_32K 0x4
-#define NVM_CFG1_PORT_RESERVED6_64K 0x5
-#define NVM_CFG1_PORT_RESERVED6_128K 0x6
-#define NVM_CFG1_PORT_RESERVED6_256K 0x7
-#define NVM_CFG1_PORT_RESERVED6_512K 0x8
-#define NVM_CFG1_PORT_RESERVED6_1M 0x9
-#define NVM_CFG1_PORT_RESERVED6_2M 0xA
-#define NVM_CFG1_PORT_RESERVED6_4M 0xB
-#define NVM_CFG1_PORT_RESERVED6_8M 0xC
-#define NVM_CFG1_PORT_RESERVED6_16M 0xD
-#define NVM_CFG1_PORT_RESERVED6_32M 0xE
-#define NVM_CFG1_PORT_RESERVED6_64M 0xF
struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
@@ -4973,18 +4748,16 @@ struct nvm_cfg1_func {
u32 device_id; /* 0x10 */
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
@@ -5029,8 +4802,8 @@ struct nvm_cfg1_func {
struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
-
- u32 reserved[9]; /* 0x2C */
+ u32 preboot_generic_cfg; /* 0x2C */
+ u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index ffa99273b353..a95a3e4b3101 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,7 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_ATOMIC);
+ GFP_KERNEL);
int i;
if (!p_pool)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0b21a553cc7d..f55ebdc3c832 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
- u8 tc, i, vport_id;
u32 inc_val;
+ u8 tc, i;
/* go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
- u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+ for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
continue;
@@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
* different TCs
*/
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = pq_ids[tc];
+ u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
- STORE_RT_REG(p_hwfn, temp + vport_pq_id,
- QM_WFQ_UPPER_BOUND |
- QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
- QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
}
@@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, vport_params))
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 796f1390e598..3269b3610e03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data[i].b_valid = false;
+ p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val)
{
- p_hwfn->rt_data[rt_offset].init_val = val;
- p_hwfn->rt_data[rt_offset].b_valid = true;
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 *val,
+ u32 rt_offset, u32 *p_val,
size_t size)
{
size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
- p_hwfn->rt_data[rt_offset + i].init_val = val[i];
- p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
-static void qed_init_rt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 addr,
- u32 rt_offset,
- u32 size)
+static int qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u16 rt_offset,
+ u16 size,
+ bool b_must_dmae)
{
- struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
- u32 i;
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ u16 i, segment;
+ int rc = 0;
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
for (i = 0; i < size; i++) {
- if (!rt_data[i].b_valid)
+ if (!p_valid[i])
continue;
- qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2),
+ p_init_val[i]);
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, 0);
+ if (rc != 0)
+ return rc;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
}
+
+ return rc;
}
int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_rt_data *rt_data;
+ struct qed_rt_data *rt_data = &p_hwfn->rt_data;
- rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
- if (!rt_data)
+ rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!rt_data->b_valid)
return -ENOMEM;
- p_hwfn->rt_data = rt_data;
+ rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!rt_data->init_val) {
+ kfree(rt_data->b_valid);
+ return -ENOMEM;
+ }
return 0;
}
void qed_init_free(struct qed_hwfn *p_hwfn)
{
- kfree(p_hwfn->rt_data);
- p_hwfn->rt_data = NULL;
+ kfree(p_hwfn->rt_data.init_val);
+ kfree(p_hwfn->rt_data.b_valid);
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
@@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_RUNTIME:
qed_init_rt(p_hwfn, p_ptt, addr,
le16_to_cpu(arg->runtime.offset),
- le16_to_cpu(arg->runtime.size));
+ le16_to_cpu(arg->runtime.size),
+ b_must_dmae);
break;
}
@@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_read_op *cmd)
{
- u32 data = le32_to_cpu(cmd->op_data);
- u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = le32_to_cpu(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
- bool (*comp_check)(u32 val,
- u32 expected_val);
- u32 delay = QED_INIT_POLL_PERIOD_US, val;
val = qed_rd(p_hwfn, p_ptt, addr);
- data = le32_to_cpu(cmd->op_data);
- if (GET_FIELD(data, INIT_READ_OP_POLL)) {
- int i;
+ if (poll == INIT_POLL_NONE)
+ return;
- switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
- case INIT_COMPARISON_EQ:
- comp_check = comp_eq;
- break;
- case INIT_COMPARISON_OR:
- comp_check = comp_or;
- break;
- case INIT_COMPARISON_AND:
- comp_check = comp_and;
- break;
- default:
- comp_check = NULL;
- DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
- data);
- return;
- }
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
- for (i = 0;
- i < QED_INIT_MAX_POLL_COUNT &&
- !comp_check(val, le32_to_cpu(cmd->expected_val));
- i++) {
- udelay(delay);
- val = qed_rd(p_hwfn, p_ptt, addr);
- }
+ data = le32_to_cpu(cmd->expected_val);
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
- if (i == QED_INIT_MAX_POLL_COUNT)
- DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
- addr, le32_to_cpu(cmd->expected_val),
- val, data);
+ if (i == QED_INIT_MAX_POLL_COUNT) {
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, le32_to_cpu(cmd->op_data));
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9cc9d62c1fec..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -39,24 +39,1737 @@ struct qed_sb_sp_info {
struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
+enum qed_attention_type {
+ QED_ATTN_TYPE_ATTN,
+ QED_ATTN_TYPE_PARITY,
+};
+
#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
-#define ATTN_STATE_BITS (0xfff)
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ int (*cb)(struct qed_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ struct attn_hw_regs chip_regs[1];
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
+ &nig_prty3_bb_b0, &nig_prty4_bb_b0};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
+ {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
+ {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
+ {"dbu", {{0, 0, NULL, NULL} } },
+ {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
+ pglue_b_prty_bb_b0_regs} } },
+ {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
+ {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
+ {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
+ {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
+ {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
+ {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
+ {"mcp", {{0, 0, NULL, NULL} } },
+ {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
+ {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
+ {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
+ pswhst2_prty_bb_b0_regs} } },
+ {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
+ {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
+ {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
+ {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
+ {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
+ {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
+ {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
+ {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
+ {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
+ {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
+ {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
+ {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
+ {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
+ {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
+ {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
+ {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
+ {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
+ {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
+ {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
+ {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
+ {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
+ {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
+ {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
+ {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
+ {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
+ {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
+ {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
+ {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
+ {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
+ {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
+ {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
+ {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
+ {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
+ {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
+ {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
+ {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
+ {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
+ {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
+ {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
+ {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
+ pbf_pb1_prty_bb_b0_regs} } },
+ {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
+ pbf_pb2_prty_bb_b0_regs} } },
+ {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
+ {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
+ {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
+ {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
+ {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
+ {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
+ {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
+ {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
+ {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
+ {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
+ {"umac", { {0, 0, NULL, NULL} } },
+ {"xmac", { {0, 0, NULL, NULL} } },
+ {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
+ {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
+ {"wol", { {0, 0, NULL, NULL} } },
+ {"bmbn", { {0, 0, NULL, NULL} } },
+ {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
+ {"nwm", { {0, 0, NULL, NULL} } },
+ {"nws", { {0, 0, NULL, NULL} } },
+ {"ms", { {0, 0, NULL, NULL} } },
+ {"phy_pcie", { {0, 0, NULL, NULL} } },
+ {"misc_aeu", { {0, 0, NULL, NULL} } },
+ {"bar0_map", { {0, 0, NULL, NULL} } },};
+
+/* Specific HW attention callbacks */
+static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ /* This might occur on certain instances; Log it once then mask it */
+ DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+ tmp);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+ 0xffffffff);
+
+ return 0;
+}
+
+#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+
+ if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->cdev,
+ "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_VF_VALID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_CLIENT),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_BYTE_EN),
+ data);
+ }
+
+ return 0;
+}
+
+#define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
+#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define QED_GRC_ATTENTION_MASTER_MASK (0xf)
+#define QED_GRC_ATTENTION_MASTER_SHIFT (24)
+#define QED_GRC_ATTENTION_PF_MASK (0xf)
+#define QED_GRC_ATTENTION_PF_SHIFT (0)
+#define QED_GRC_ATTENTION_VF_MASK (0xff)
+#define QED_GRC_ATTENTION_VF_SHIFT (4)
+#define QED_GRC_ATTENTION_PRIV_MASK (0x3)
+#define QED_GRC_ATTENTION_PRIV_SHIFT (14)
+#define QED_GRC_ATTENTION_PRIV_VF (0)
+static const char *attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1: return "PXP";
+ case 2: return "MCP";
+ case 3: return "MSDM";
+ case 4: return "PSDM";
+ case 5: return "YSDM";
+ case 6: return "USDM";
+ case 7: return "TSDM";
+ case 8: return "XSDM";
+ case 9: return "DBU";
+ case 10: return "DMAE";
+ default:
+ return "Unkown";
+ }
+}
+
+static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->cdev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
+ attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
+ (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return 0;
+}
+
+#define PGLUE_ATTENTION_VALID (1 << 29)
+#define PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
+#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
+#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
+#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
+#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
+#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID)
+ DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return 0;
+}
+
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 reason;
+
+ reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ if (reason) {
+ u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+
+ DP_INFO(p_hwfn->cdev,
+ "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+ (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ reason);
+ }
+
+ return -EINVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT,
+ qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
+ (1 << ATTENTION_OFFSET_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d",
+ (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) |
+ (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
+ NULL, BLOCK_CNIG},
+ {"MCP CPU", ATTENTION_SINGLE,
+ qed_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT,
+ qed_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT,
+ qed_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT,
+ qed_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
+ NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+};
+
+#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
/* Virtual & Physical address of the SB */
struct atten_status_block *sb_attn;
- dma_addr_t sb_phys;
+ dma_addr_t sb_phys;
/* Last seen running index */
- u16 index;
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
/* Previously asserted attentions, which are still unasserted */
- u16 known_attn;
+ u16 known_attn;
/* Cleanup address for the link's general hw attention */
- u32 mfw_attn_addr;
+ u32 mfw_attn_addr;
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
@@ -127,6 +1840,162 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
+ struct attn_hw_reg *p_reg_desc,
+ struct attn_hw_block *p_block,
+ enum qed_attention_type type,
+ u32 val, u32 mask)
+{
+ int j;
+
+ for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+ if (!(val & (1 << j)))
+ continue;
+
+ DP_NOTICE(p_hwfn,
+ "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
+ p_block->name,
+ type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
+ "Parity",
+ p_reg_desc->reg_idx, p_reg_desc->sts_addr,
+ j, (mask & (1 << j)) ? " [MASKED]" : "");
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return int
+ */
+static int
+qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ u32 bitmask)
+{
+ int rc = -EINVAL;
+ u32 val;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_aeu->bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_aeu->bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ /* Handle HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ struct attn_hw_block *p_block;
+ u32 mask;
+ int i;
+
+ p_block = &attn_blocks[p_aeu->block_index];
+
+ /* Handle each interrupt register */
+ for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 sts_addr;
+
+ p_reg_desc = p_block->chip_regs[0].int_regs[i];
+
+ /* In case of fatal attention, don't clear the status
+ * so it would appear in following idle check.
+ */
+ if (rc == 0)
+ sts_addr = p_reg_desc->sts_clr_addr;
+ else
+ sts_addr = p_reg_desc->sts_addr;
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_ATTN,
+ val, mask);
+ }
+ }
+
+ /* If the attention is benign, no need to prevent it */
+ if (!rc)
+ goto out;
+
+ /* Prevent this Attention from being asserted in the future */
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_aeu->bit_name);
+
+out:
+ return rc;
+}
+
+static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ struct attn_hw_block *p_block,
+ u8 bit_index)
+{
+ int i;
+
+ for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 val, mask;
+
+ p_reg_desc = p_block->chip_regs[0].prty_regs[i];
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->sts_clr_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_PARITY,
+ val, mask);
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param bit_index
+ */
+static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index;
+
+ DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
+ p_aeu->bit_name, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+ bit_index);
+
+ /* In BB, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_OPTE],
+ bit_index);
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_MCP],
+ bit_index);
+ }
+ }
+}
+
/**
* @brief - handles deassertion of previously asserted attentions.
*
@@ -139,17 +2008,108 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_mask;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ u8 i, j, k, bit_idx;
+ int rc = 0;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n",
+ i, aeu_inv_arr[i]);
+ }
+
+ /* Find parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32));
+ u32 parities;
- if (deasserted_bits != 0x100)
- DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+ /* Skip register in which no parity bit is currently set */
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if ((p_bit->flags & ATTENTION_PARITY) &&
+ !!(parities & (1 << bit_idx)))
+ qed_int_deassertion_parity(p_hwfn, p_bit,
+ bit_idx);
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ u32 en, bits;
+
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ u8 bit, bit_len;
+ u32 bitmask;
+
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ /* No need to handle parity-only bits */
+ if (p_aeu->flags == ATTENTION_PAR)
+ continue;
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (p_aeu->flags & ATTENTION_PAR_INT) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ if (bitmask) {
+ /* Handle source of the attention */
+ qed_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- ((IGU_CMD_ATTN_BIT_CLR_UPPER -
- IGU_CMD_INT_ACK_BASE) << 3),
- ~((u32)deasserted_bits));
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -160,7 +2120,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
/* Clear deassertion from inner state */
sb_attn_sw->known_attn &= ~deasserted_bits;
- return 0;
+ return rc;
}
static int qed_int_attentions(struct qed_hwfn *p_hwfn)
@@ -343,17 +2303,17 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
{
- struct qed_dev *cdev = p_hwfn->cdev;
- struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
-
- if (p_sb) {
- if (p_sb->sb_attn)
- dma_free_coherent(&cdev->pdev->dev,
- SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
- kfree(p_sb);
- }
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -379,10 +2339,31 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
sb_info->sb_attn = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ unsigned int flags = aeu_descs[i].bits[j].flags;
+
+ if (flags & ATTENTION_PARITY)
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(flags);
+ }
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
/* Set the address of cleanup for the mcp attention */
sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
MISC_REG_AEU_GENERAL_ATTN_0;
@@ -399,7 +2380,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
dma_addr_t p_phys = 0;
/* SB struct */
- p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) {
DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
return -ENOMEM;
@@ -433,6 +2414,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -451,14 +2433,12 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cau_state = CAU_HC_DISABLE_STATE;
- if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
cau_state = CAU_HC_ENABLE_STATE;
- if (!p_hwfn->cdev->rx_coalesce_usecs)
- p_hwfn->cdev->rx_coalesce_usecs =
- QED_CAU_DEF_RX_USECS;
- if (!p_hwfn->cdev->tx_coalesce_usecs)
- p_hwfn->cdev->tx_coalesce_usecs =
- QED_CAU_DEF_TX_USECS;
+ if (!cdev->rx_coalesce_usecs)
+ cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
+ if (!cdev->tx_coalesce_usecs)
+ cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
@@ -473,20 +2453,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid)
{
struct cau_sb_entry sb_entry;
- u32 val;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
vf_number, vf_valid);
if (p_hwfn->hw_init_done) {
- val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
- qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
- qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
- upper_32_bits(sb_phys));
-
- val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
- qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
- qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -638,8 +2618,10 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- p_hwfn->sbs_info[sb_id] = NULL;
- p_hwfn->num_sbs--;
+ if (p_hwfn->sbs_info[sb_id] != NULL) {
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+ }
return 0;
}
@@ -648,14 +2630,15 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
{
struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
- if (p_sb) {
- if (p_sb->sb_info.sb_virt)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- SB_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_info.sb_virt,
- p_sb->sb_info.sb_phys);
- kfree(p_sb);
- }
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
@@ -666,7 +2649,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
/* SB struct */
- p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
return -ENOMEM;
@@ -692,25 +2675,6 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- if (!p_hwfn)
- return;
-
- if (p_hwfn->p_sp_sb)
- qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup Slow path status block - NULL pointer\n");
-
- if (p_hwfn->p_sb_attn)
- qed_int_sb_attn_setup(p_hwfn, p_ptt);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup attentions status block - NULL pointer\n");
-}
-
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
void *cookie,
@@ -718,36 +2682,36 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
+ int rc = -ENOMEM;
u8 pi;
/* Look for a free index */
for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
- if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
- p_sp_sb->pi_info_arr[pi].cookie = cookie;
- *sb_idx = pi;
- *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
- qed_status = 0;
- break;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = 0;
+ break;
}
- return qed_status;
+ return rc;
}
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
- if (p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
- p_sp_sb->pi_info_arr[pi].cookie = NULL;
- qed_status = 0;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
+ return -ENOMEM;
- return qed_status;
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+
+ return 0;
}
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
@@ -786,16 +2750,13 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
- int rc, i;
-
- /* Mask non-link attentions */
- for (i = 0; i < 9; i++)
- qed_wr(p_hwfn, p_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+ int rc = 0;
- /* Configure AEU signal change to produce attentions for link */
+ /* Configure AEU signal change to produce attentions */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
/* Flush the writes to IGU */
mmiowb();
@@ -937,6 +2898,39 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
}
}
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 sb_id)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * sb_id);
+ struct qed_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ goto out;
+
+ /* Fill the block information */
+ p_block->status = QED_IGU_STATUS_VALID;
+ p_block->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ sb_id, val, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
+out:
+ return val;
+}
+
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -946,7 +2940,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 sb_id;
u16 prev_sb_id = 0xFF;
- p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
@@ -963,26 +2957,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
break;
- blk->status = QED_IGU_STATUS_VALID;
- blk->function_id = GET_FIELD(val,
- IGU_MAPPING_LINE_FUNCTION_NUMBER);
- blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- blk->vector_number = GET_FIELD(val,
- IGU_MAPPING_LINE_VECTOR_NUMBER);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
- val, blk->function_id, blk->is_pf,
- blk->vector_number);
-
if (blk->is_pf) {
if (blk->function_id == p_hwfn->rel_pf_id) {
blk->status |= QED_IGU_STATUS_PF;
@@ -1072,7 +3053,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
{
- p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
if (!p_hwfn->sp_dpc)
return -ENOMEM;
@@ -1117,22 +3098,22 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
qed_int_sp_dpc_setup(p_hwfn);
}
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks)
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info)
{
struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
- if (!info)
- return 0;
-
- if (p_iov_blks)
- *p_iov_blks = info->free_blks;
+ if (!info || !p_sb_cnt_info)
+ return;
- return info->igu_sb_cnt;
+ p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+ p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+ p_sb_cnt_info->sb_free_blk = info->free_blks;
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 51e0b09a7f47..c57f2e680770 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -161,12 +161,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie);
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
- * @param p_iov_blks - configured free blks for vfs
+ * @param p_sb_cnt_info
*
* @return int - number of status blocks configured
*/
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks);
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info);
/**
* @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index f72036a2ef5b..3f35c6ca9252 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,6 +31,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
@@ -124,52 +125,65 @@ struct qed_sp_vport_update_params {
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_approx_mcast_flg;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
};
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- u32 concrete_fid,
- u16 opaque_fid,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_sp_vport_start_params *p_params)
{
- struct qed_sp_init_request_params params;
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
u16 rx_mode = 0;
u8 abs_vport_id = 0;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
- memset(&params, 0, sizeof(params));
- params.ramrod_data_size = sizeof(*p_ramrod);
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_VPORT_START,
- PROTOCOLID_ETH,
- &params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.vport_start;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->mtu = cpu_to_le16(mtu);
- p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
- p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+ p_ramrod->mtu = cpu_to_le16(p_params->mtu);
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -180,9 +194,26 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
+ p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case QED_TPA_MODE_GRO:
+ p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+ p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+ p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
- concrete_fid);
+ p_params->concrete_fid);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -360,7 +391,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
- struct qed_sp_init_request_params sp_params;
+ struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
u8 abs_vport_id = 0;
@@ -370,17 +401,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
if (rc != 0)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_params->opaque_fid,
ETH_RAMROD_VPORT_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -393,7 +422,9 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
-
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ p_cmn->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -412,8 +443,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u8 vport_id)
{
- struct qed_sp_init_request_params sp_params;
struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
u8 abs_vport_id = 0;
int rc;
@@ -422,16 +453,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
if (rc != 0)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_VPORT_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -444,8 +473,10 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -454,6 +485,8 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
memset(&vport_update_params, 0, sizeof(vport_update_params));
vport_update_params.vport_id = vport;
vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -471,6 +504,10 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
"Accept filter configured, flags = [Rx]%x [Tx]%x\n",
accept_flags.rx_accept_filter,
accept_flags.tx_accept_filter);
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
}
return 0;
@@ -502,8 +539,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_rx_cid;
u16 abs_rx_q_id = 0;
u8 abs_vport_id = 0;
@@ -528,15 +565,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
opaque_fid, cid, params->queue_id, params->vport_id,
params->sb);
- memset(&sp_params, 0, sizeof(params));
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- cid, opaque_fid,
ETH_RAMROD_RX_QUEUE_START,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -551,12 +588,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->complete_event_flg = 1;
p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
- p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
- p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
- p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
- p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
@@ -628,21 +663,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- p_rx_cid->cid,
- p_rx_cid->opaque_fid,
ETH_RAMROD_RX_QUEUE_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -680,8 +714,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid;
u8 abs_vport_id;
int rc = -EINVAL;
@@ -696,15 +730,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
- opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_TX_QUEUE_START,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -714,11 +748,9 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
- p_ramrod->tc = p_pq_params->eth.tc;
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
- p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
pq_id = qed_get_qm_pq(p_hwfn,
PROTOCOLID_ETH,
@@ -785,20 +817,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_tx_cid->cid;
+ init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- p_tx_cid->cid,
- p_tx_cid->opaque_fid,
ETH_RAMROD_TX_QUEUE_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -821,9 +852,8 @@ qed_filter_action(enum qed_filter_opcode opcode)
case QED_FILTER_REMOVE:
action = ETH_FILTER_ACTION_REMOVE;
break;
- case QED_FILTER_REPLACE:
case QED_FILTER_FLUSH:
- action = ETH_FILTER_ACTION_REPLACE;
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
break;
default:
action = MAX_ETH_FILTER_ACTION;
@@ -856,9 +886,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
{
u8 vport_to_add_to = 0, vport_to_remove_from = 0;
struct vport_filter_update_ramrod_data *p_ramrod;
- struct qed_sp_init_request_params sp_params;
struct eth_filter_cmd *p_first_filter;
struct eth_filter_cmd *p_second_filter;
+ struct qed_sp_init_data init_data;
enum eth_filter_action action;
int rc;
@@ -872,17 +902,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(**pp_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, pp_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_FILTERS_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -892,8 +921,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
switch (p_filter_cmd->opcode) {
- case QED_FILTER_FLUSH:
- p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_REPLACE:
case QED_FILTER_MOVE:
p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
default:
@@ -962,6 +990,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_second_filter->action = ETH_FILTER_ACTION_ADD;
p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ memcpy(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
} else {
action = qed_filter_action(p_filter_cmd->opcode);
@@ -1101,8 +1135,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
{
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
int rc, i;
@@ -1118,18 +1152,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return rc;
}
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
ETH_RAMROD_VPORT_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
-
+ PROTOCOLID_ETH, &init_data);
if (rc) {
DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
return rc;
@@ -1230,6 +1262,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
return rc;
}
+/* Statistics related code */
+static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len)
+{
+ *p_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ *p_len = sizeof(struct tstorm_per_port_stat);
+}
+
+static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ u32 tstats_addr = 0, tstats_len = 0;
+ struct tstorm_per_port_stat tstats;
+
+ __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ tstats_addr, tstats_len);
+
+ p_stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+}
+
+static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ mstats_addr, mstats_len);
+
+ p_stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
+{
+ struct port_stats port_stats;
+ int j;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+ p_stats->rx_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+ p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ p_stats->rx_align_errors += port_stats.pmm.raln;
+ p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+ p_stats->rx_jabbers += port_stats.pmm.rjbr;
+ p_stats->rx_undersize_packets += port_stats.pmm.rund;
+ p_stats->rx_fragments += port_stats.pmm.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ p_stats->tx_pause_frames += port_stats.pmm.txpf;
+ p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ p_stats->tx_total_collisions += port_stats.pmm.tncl;
+ p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ for (j = 0; j < 8; j++) {
+ p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+}
+
+static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *stats,
+ u16 statistics_bin)
+{
+ __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+ if (p_hwfn->mcp_info)
+ __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ continue;
+ }
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr = 0, len = 0;
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ _qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1268,24 +1622,25 @@ static void qed_register_eth_ops(struct qed_dev *cdev,
}
static int qed_start_vport(struct qed_dev *cdev,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_start_vport_params *params)
{
int rc, i;
for_each_hwfn(cdev, i) {
+ struct qed_sp_vport_start_params start = { 0 };
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_sp_vport_start(p_hwfn,
- p_hwfn->hw_info.concrete_fid,
- p_hwfn->hw_info.opaque_fid,
- vport_id,
- mtu,
- drop_ttl0_flg,
- inner_vlan_removal_en_flg);
-
+ start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
+ QED_TPA_MODE_NONE;
+ start.remove_inner_vlan = params->remove_inner_vlan;
+ start.drop_ttl0 = params->drop_ttl0;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.vport_id = params->vport_id;
+ start.max_buffers_per_cqe = 16;
+ start.mtu = params->mtu;
+
+ rc = qed_sp_vport_start(p_hwfn, &start);
if (rc) {
DP_ERR(cdev, "Failed to start VPORT\n");
return rc;
@@ -1295,7 +1650,7 @@ static int qed_start_vport(struct qed_dev *cdev,
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
- vport_id, mtu);
+ start.vport_id, start.mtu);
}
qed_reset_vport_stats(cdev);
@@ -1344,6 +1699,9 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.accept_any_vlan = params->accept_any_vlan;
+ sp_params.update_accept_any_vlan_flg =
+ params->update_accept_any_vlan_flg;
/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
* We need to re-fix the rss values per engine for CMT.
@@ -1563,7 +1921,7 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
- return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
QED_SPQ_MODE_CB, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 9d76ce249277..26d40db07ddd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -29,10 +29,10 @@
#include "qed_mcp.h"
#include "qed_hw.h"
-static const char version[] =
- "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+static char version[] =
+ "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
-MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define QED_FW_FILE_NAME \
"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+MODULE_FIRMWARE(QED_FW_FILE_NAME);
+
static int __init qed_init(void)
{
pr_notice("qed_init called\n");
@@ -97,12 +99,15 @@ static void qed_free_pci(struct qed_dev *cdev)
pci_disable_device(pdev);
}
+#define PCI_REVISION_ID_ERROR_VAL 0xff
+
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
static int qed_init_pci(struct qed_dev *cdev,
struct pci_dev *pdev)
{
+ u8 rev_id;
int rc;
cdev->pdev = pdev;
@@ -136,6 +141,14 @@ static int qed_init_pci(struct qed_dev *cdev,
pci_save_state(pdev);
}
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+ if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
+ DP_NOTICE(cdev,
+ "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
+ rev_id);
+ rc = -ENODEV;
+ goto err2;
+ }
if (!pci_is_pcie(pdev)) {
DP_NOTICE(cdev, "The bus is not PCI Express\n");
rc = -EIO;
@@ -190,7 +203,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION;
@@ -621,15 +634,18 @@ static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
- int rc, i;
- u8 num_vectors = 0;
-
+ struct qed_sb_cnt_info sb_cnt_info;
+ int rc;
+ int i;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
- for_each_hwfn(cdev, i)
- num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
- cdev->int_params.in.num_vectors = num_vectors;
+ for_each_hwfn(cdev, i) {
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
+ cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors++; /* slowpath */
+ }
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
@@ -763,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
- goto err3;
+ goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
@@ -782,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return rc;
}
+ qed_reset_vport_stats(cdev);
+
return 0;
-err3:
- qed_free_stream_mem(cdev);
- qed_slowpath_irq_free(cdev);
err2:
+ qed_hw_timers_stop_all(cdev);
+ qed_slowpath_irq_free(cdev);
+ qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index ba1b1f1ef789..b89c9a8e1655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -11,8 +11,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
@@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
u32 size;
/* Allocate mcp_info structure */
- p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
if (!p_hwfn->mcp_info)
goto err;
p_info = p_hwfn->mcp_info;
@@ -161,15 +161,15 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
}
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
- p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow =
kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_ATOMIC);
+ p_info->mfw_mb_length), GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW mutex */
- mutex_init(&p_info->mutex);
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->lock);
return 0;
@@ -179,6 +179,52 @@ err:
return -ENOMEM;
}
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ spin_lock_bh(&p_hwfn->mcp_info->lock);
+
+ /* The spinlock shouldn't be acquired when the mailbox command is
+ * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+ * pending [UN]LOAD_REQ command of another PF together with a spinlock
+ * (i.e. interrupts are disabled) - can lead to a deadlock.
+ * It is assumed that for a single PF, no other mailbox commands can be
+ * sent from another context while sending LOAD_REQ, and that any
+ * parallel commands to UNLOAD_REQ can be cancelled.
+ */
+ if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (p_hwfn->mcp_info->block_mb_sending) {
+ DP_NOTICE(p_hwfn,
+ "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
+ cmd);
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ return -EBUSY;
+ }
+
+ if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ p_hwfn->mcp_info->block_mb_sending = true;
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ }
+
+ return 0;
+}
+
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+}
+
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -187,6 +233,13 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
u32 org_mcp_reset_seq, cnt = 0;
int rc = 0;
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ if (rc != 0)
+ return rc;
+
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
@@ -209,6 +262,8 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
rc = -EAGAIN;
}
+ qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+
return rc;
}
@@ -275,14 +330,12 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params)
{
- int rc = 0;
+ u32 union_data_addr;
+ int rc;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -290,28 +343,56 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- /* Lock Mutex to ensure only single thread is
- * accessing the MCP at one time
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
*/
- mutex_lock(&p_hwfn->mcp_info->mutex);
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
- o_mcp_resp, o_mcp_param);
- /* Release Mutex */
- mutex_unlock(&p_hwfn->mcp_info->mutex);
+ rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+ if (rc)
+ return rc;
+
+ if (p_mb_params->p_data_src != NULL)
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
+ p_mb_params->p_data_src,
+ sizeof(*p_mb_params->p_data_src));
+
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+ p_mb_params->param, &p_mb_params->mcp_resp,
+ &p_mb_params->mcp_param);
+
+ if (p_mb_params->p_data_dst != NULL)
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr,
+ sizeof(*p_mb_params->p_data_dst));
+
+ qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
return rc;
}
-static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
{
- u32 i;
+ struct qed_mcp_mb_params mb_params;
+ int rc;
- /* Copy version string to MCP */
- for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
- DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
- *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return 0;
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
@@ -319,26 +400,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
- u32 param;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
int rc;
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
-
- /* Save driver's version to shmem */
- qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
+ memset(&mb_params, 0, sizeof(mb_params));
/* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
- (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type),
- p_load_code, &param);
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type;
+ memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -346,6 +419,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return rc;
}
+ *p_load_code = mb_params.mcp_resp;
+
/* If MFW refused (e.g. other port is in diagnostic mode) we
* must abort. This can happen in the following cases:
* - Other port is in diagnostic mode
@@ -365,6 +440,33 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_HW | QED_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+ transceiver_state,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state,
+ PMM_TRANSCEIVER_STATE);
+
+ if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_reset)
@@ -390,7 +492,10 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
return;
}
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ if (p_hwfn->b_drv_link_init)
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ else
+ p_link->link_up = false;
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -492,53 +597,43 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
- u32 param = 0, reply = 0, cmd;
- struct pmm_phy_cfg phy_cfg;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ struct pmm_phy_cfg *phy_cfg;
int rc = 0;
- u32 i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ u32 cmd;
/* Set the shmem configuration according to params */
- memset(&phy_cfg, 0, sizeof(phy_cfg));
+ phy_cfg = &union_data.drv_phy_cfg;
+ memset(phy_cfg, 0, sizeof(*phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg.speed = params->speed.forced_speed;
- phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
- phy_cfg.adv_speed = params->speed.advertised_speeds;
- phy_cfg.loopback_mode = params->loopback_mode;
-
- /* Write the requested configuration to shmem */
- for (i = 0; i < sizeof(phy_cfg); i += 4)
- qed_wr(p_hwfn, p_ptt,
- p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data) + i,
- ((u32 *)&phy_cfg)[i >> 2]);
+ phy_cfg->speed = params->speed.forced_speed;
+ phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->adv_speed = params->speed.advertised_speeds;
+ phy_cfg->loopback_mode = params->loopback_mode;
+
+ p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg.speed,
- phy_cfg.pause,
- phy_cfg.adv_speed,
- phy_cfg.loopback_mode,
- phy_cfg.feature_config_flags);
+ phy_cfg->speed,
+ phy_cfg->pause,
+ phy_cfg->adv_speed,
+ phy_cfg->loopback_mode,
+ phy_cfg->feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
- /* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -581,6 +676,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -720,26 +818,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- if (p_hwfn->cdev->mf_mode != SF) {
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
}
if (shmem_info.mac_upper || shmem_info.mac_lower) {
@@ -802,11 +899,11 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 100,
+ DRV_MSG_CODE_NIG_DRAIN, 1000,
&resp, &param);
/* Wait for the drain to complete before returning */
- msleep(120);
+ msleep(1020);
return rc;
}
@@ -832,31 +929,28 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- int rc = 0;
- u32 param = 0, reply = 0, i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ struct drv_version_stc *p_drv_version;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ __be32 val;
+ u32 i;
+ int rc;
- DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
- p_ver->version);
- /* Copy version string to shmem */
- for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
- DRV_MB_WR(p_hwfn, p_ptt,
- union_data.drv_version.name[i * sizeof(u32)],
- *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ p_drv_version = &union_data.drv_version;
+ p_drv_version->version = p_ver->version;
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
+ val = cpu_to_be32(p_ver->name[i]);
+ *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
- &param);
- if (rc) {
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
- return rc;
- }
- return 0;
+ return rc;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 506197d5c3dd..50917a2131a5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "qed_hsi.h"
struct qed_mcp_link_speed_params {
@@ -255,7 +255,8 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
- struct mutex mutex; /* MCP access lock */
+ spinlock_t lock;
+ bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@@ -272,6 +273,15 @@ struct qed_mcp_info {
u16 mcp_hist;
};
+struct qed_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ union drv_union_data *p_data_src;
+ union drv_union_data *p_data_dst;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
/**
* @brief Initialize the interface with the MCP
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index e8df12335a97..c15b1622e636 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -127,8 +127,20 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_DB_DROP_REASON \
+ 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS \
+ 0x100a24UL
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \
+ 0x100a1cUL
#define GRC_REG_TIMEOUT_EN \
0x050404UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \
+ 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \
+ 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \
+ 0x050050UL
#define IGU_REG_BLOCK_CONFIGURATION \
0x180040UL
#define MCM_REG_INIT \
@@ -155,6 +167,40 @@
0x1100000UL
#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
0x2a9000UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \
+ 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \
+ 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \
+ 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS \
+ 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \
+ 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \
+ 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS \
+ 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \
+ 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \
+ 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \
+ 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \
+ 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \
+ 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \
+ 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \
+ 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \
+ 0x2aae7cUL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \
+ 0x2aae80UL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
+ 0x2aa3bcUL
#define PRM_REG_DISABLE_PRM \
0x230000UL
#define PRS_REG_SOFT_RST \
@@ -171,6 +217,14 @@
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
0x29e050UL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID \
+ 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \
+ 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA \
+ 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \
+ 0x2a006cUL
#define PSWRD_REG_DBG_SELECT \
0x29c040UL
#define PSWRD2_REG_CONF11 \
@@ -333,6 +387,8 @@
0x180800UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
+ 0x0087b4UL
#define MISC_REG_AEU_MASK_ATTN_IGU \
0x008494UL
#define IGU_REG_CLEANUP_STATUS_0 \
@@ -363,6 +419,10 @@
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
+#define MCP_REG_CPU_STATE \
+ 0xe05004UL
+#define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 287fadfab52d..d39f914b66ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn,
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
-struct qed_sp_init_request_params {
- size_t ramrod_data_size;
+struct qed_sp_init_data {
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
enum spq_mode comp_mode;
struct qed_spq_comp_cb *p_comp_data;
};
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u32 cid,
- u16 opaque_fid,
u8 cmd,
u8 protocol,
- struct qed_sp_init_request_params *p_params);
+ struct qed_sp_init_data *p_data);
/**
* @brief qed_sp_pf_start - PF Function Start Ramrod
@@ -343,7 +344,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum mf_mode mode);
+ enum qed_mf_mode mode);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 6f7879136633..1c06c37d4c3d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -23,15 +23,13 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u32 cid,
- u16 opaque_fid,
u8 cmd,
u8 protocol,
- struct qed_sp_init_request_params *p_params)
+ struct qed_sp_init_data *p_data)
{
- int rc = -EINVAL;
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
- u32 opaque_cid = opaque_fid << 16 | cid;
+ int rc;
if (!pp_ent)
return -ENOMEM;
@@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
- p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_mode = p_data->comp_mode;
p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) {
@@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
break;
case QED_SPQ_MODE_BLOCK:
- if (!p_params->p_comp_data)
+ if (!p_data->p_comp_data)
return -EINVAL;
- p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
case QED_SPQ_MODE_CB:
- if (!p_params->p_comp_data)
+ if (!p_data->p_comp_data)
p_ent->comp_cb.function = NULL;
else
- p_ent->comp_cb = *p_params->p_comp_data;
+ p_ent->comp_cb = *p_data->p_comp_data;
break;
default:
@@ -83,37 +81,35 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
- if (p_params->ramrod_data_size)
- memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum mf_mode mode)
+ enum qed_mf_mode mode)
{
- struct qed_sp_init_request_params params;
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
- memset(&params, 0, sizeof(params));
- params.ramrod_data_size = sizeof(*p_ramrod);
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn,
- &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON,
- &params);
+ &init_data);
if (rc)
return rc;
@@ -125,26 +121,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
p_ramrod->mf_mode = mode;
+ switch (mode) {
+ case QED_MF_DEFAULT:
+ case QED_MF_NPAR:
+ p_ramrod->mf_mode = MF_NPAR;
+ break;
+ case QED_MF_OVLAN:
+ p_ramrod->mf_mode = MF_OVLAN;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ p_ramrod->mf_mode = MF_NPAR;
+ }
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
- p_ramrod->event_ring_pbl_addr.hi =
- DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
- p_ramrod->event_ring_pbl_addr.lo =
- DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
- p_ramrod->consolid_q_pbl_addr.hi =
- DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
- p_ramrod->consolid_q_pbl_addr.lo =
- DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+ p_hwfn->p_consq->chain.pbl.p_phys_table);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
- (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
p_ramrod->outer_tag);
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -152,17 +155,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
- struct qed_sp_init_request_params params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
- memset(&params, 0, sizeof(params));
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
- &params);
+ &init_data);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3dd548ab8df1..89469d5aae25 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.lo =
- DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.hi =
- DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@@ -327,7 +325,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq;
/* Allocate EQ struct */
- p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
return NULL;
@@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_virt = p_spq->p_virt;
for (i = 0; i < p_spq->chain.capacity; i++) {
- p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
- p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -457,7 +454,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
/* SPQ struct */
p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM;
@@ -853,7 +850,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
- p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 7c6caf7f6612..d023251544d9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 4
+#define QEDE_MINOR_VERSION 7
#define QEDE_REVISION_VERSION 0
#define QEDE_ENGINEERING_VERSION 0
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -100,6 +100,12 @@ struct qede_stats {
u64 tx_mac_ctrl_frames;
};
+struct qede_vlan {
+ struct list_head list;
+ u16 vid;
+ bool configured;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -154,6 +160,11 @@ struct qede_dev {
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
+ bool gro_disable;
+ struct list_head vlan_list;
+ u16 configured_vlans;
+ u16 non_configured_vlans;
+ bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
};
@@ -173,9 +184,27 @@ enum QEDE_STATE {
* skb are built only after the frame was DMA-ed.
*/
struct sw_rx_data {
- u8 *data;
+ struct page *data;
+ dma_addr_t mapping;
+ unsigned int page_offset;
+};
- DEFINE_DMA_UNMAP_ADDR(mapping);
+enum qede_agg_state {
+ QEDE_AGG_STATE_NONE = 0,
+ QEDE_AGG_STATE_START = 1,
+ QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+ struct sw_rx_data replace_buf;
+ dma_addr_t replace_buf_mapping;
+ struct sw_rx_data start_buf;
+ dma_addr_t start_buf_mapping;
+ struct eth_fast_path_rx_tpa_start_cqe start_cqe;
+ enum qede_agg_state agg_state;
+ struct sk_buff *skb;
+ int frag_id;
+ u16 vlan_tag;
};
struct qede_rx_queue {
@@ -187,7 +216,11 @@ struct qede_rx_queue {
struct qed_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr;
+ /* GRO */
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
int rx_buf_size;
+ unsigned int rx_buf_seg_size;
u16 num_rx_buffers;
u16 rxq_id;
@@ -281,6 +314,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e442b85c9a5e..c49dc10ce151 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (edev->dev_info.common.is_mf) {
+ if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
- "Link parameters can not be changed in MF mode\n");
+ "Link parameters can not be changed in non-default mode\n");
return -EOPNOTSUPP;
}
@@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf) {
+ if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
"Pause parameters can not be updated in non-default mode\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6237f10b5119..518af329502d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -39,10 +39,10 @@
#include "qede.h"
-static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
- DRV_MODULE_VERSION "\n";
+static char version[] =
+ "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
-MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_40 0x1634
-#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_10 0x1666
#define CHIP_NUM_57980S_MF 0x1636
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
@@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
struct eth_tx_3rd_bd *third_bd)
{
u8 l4_proto;
- u16 bd2_bits = 0, bd2_bits2 = 0;
+ u16 bd2_bits1 = 0, bd2_bits2 = 0;
- bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+ bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
- bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
@@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
l4_proto = ip_hdr(skb)->protocol;
if (l4_proto == IPPROTO_UDP)
- bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
- if (third_bd) {
+ if (third_bd)
third_bd->data.bitfields |=
- ((tcp_hdrlen(skb) / 4) &
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
- }
+ cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
- second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
}
@@ -381,6 +380,28 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
+ u8 xmit_type)
+{
+ int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+ if (xmit_type & XMIT_LSO) {
+ int hlen;
+
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* linear payload would require its own BD */
+ if (skb_headlen(skb) > hlen)
+ allowed_frags--;
+ }
+
+ return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -408,16 +429,22 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
- /* Current code doesn't support SKB linearization, since the max number
- * of skb frags can be passed in the FW HSI.
- */
- BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
-
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
(MAX_SKB_FRAGS + 1));
xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+ if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+ if (skb_linearize(skb)) {
+ DP_NOTICE(edev,
+ "SKB linearization failed - silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb;
@@ -464,12 +491,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
+ u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
+
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -491,7 +522,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
- (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
/* Make life easier for FW guys who can't deal with header and
* data on same BD. If we need to split, use the second bd...
@@ -719,26 +750,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
-/* This function copies the Rx buffer from the CONS position to the PROD
- * position, since we failed to allocate a new Rx buffer.
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
*/
-static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+static inline void qede_reuse_page(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
{
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *sw_rx_data_cons =
- &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- struct sw_rx_data *sw_rx_data_prod =
- &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ struct sw_rx_data *curr_prod;
+ dma_addr_t new_mapping;
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(sw_rx_data_cons, mapping));
+ curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ *curr_prod = *curr_cons;
- sw_rx_data_prod->data = sw_rx_data_cons->data;
- memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+ new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
- rxq->sw_rx_cons++;
rxq->sw_rx_prod++;
+ curr_cons->data = NULL;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ /* Move to the next segment in the page */
+ curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+ if (curr_cons->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ return -ENOMEM;
+
+ dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ } else {
+ /* Increment refcount of the page as we don't want
+ * network stack to take the ownership of the page
+ * which can be recycled multiple times by the driver.
+ */
+ atomic_inc(&curr_cons->data->_count);
+ qede_reuse_page(edev, rxq, curr_cons);
+ }
+
+ return 0;
}
static inline void qede_update_rx_prod(struct qede_dev *edev,
@@ -809,6 +866,281 @@ static inline void qede_skb_receive(struct qede_dev *edev,
napi_gro_receive(&fp->napi, skb);
}
+static void qede_set_gro_params(struct qede_dev *edev,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+ if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+ PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+ cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ u8 tpa_agg_index,
+ u16 len_on_bd)
+{
+ struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+ NUM_RX_BDS_MAX];
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+ struct sk_buff *skb = tpa_info->skb;
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto out;
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, tpa_info->frag_id++,
+ current_bd->data, current_bd->page_offset,
+ len_on_bd);
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ goto out;
+ }
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+
+ skb->data_len += len_on_bd;
+ skb->truesize += rxq->rx_buf_seg_size;
+ skb->len += len_on_bd;
+
+ return 0;
+
+out:
+ return -ENOMEM;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ dma_addr_t mapping = tpa_info->replace_buf_mapping;
+ struct sw_rx_data *sw_rx_data_cons;
+ struct sw_rx_data *sw_rx_data_prod;
+ enum pkt_hash_types rxhash_type;
+ u32 rxhash;
+
+ sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ /* Use pre-allocated replacement buffer - we can't release the agg.
+ * start until its over and we don't want to risk allocation failing
+ * here, so re-allocate when aggregation will be over.
+ */
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(replace_buf, mapping));
+
+ sw_rx_data_prod->data = replace_buf->data;
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+ rxq->sw_rx_prod++;
+
+ /* move partial skb from cons to pool (don't unmap yet)
+ * save mapping, incase we drop the packet later on.
+ */
+ tpa_info->start_buf = *sw_rx_data_cons;
+ mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+ le32_to_cpu(rx_bd_cons->addr.lo));
+
+ tpa_info->start_buf_mapping = mapping;
+ rxq->sw_rx_cons++;
+
+ /* set tpa state to start only if we are able to allocate skb
+ * for this aggregation, otherwise mark as error and aggregation will
+ * be dropped
+ */
+ tpa_info->skb = netdev_alloc_skb(edev->ndev,
+ le16_to_cpu(cqe->len_on_first_bd));
+ if (unlikely(!tpa_info->skb)) {
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ return;
+ }
+
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+ memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
+
+ /* Start filling in the aggregation info */
+ tpa_info->frag_id = 0;
+ tpa_info->agg_state = QEDE_AGG_STATE_START;
+
+ rxhash = qede_get_rxhash(edev, cqe->bitfields,
+ cqe->rss_hash, &rxhash_type);
+ skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+ if ((le16_to_cpu(cqe->pars_flags.flags) >>
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ else
+ tpa_info->vlan_tag = 0;
+
+ /* This is needed in order to enable forwarding support */
+ qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+ if (likely(cqe->ext_bd_len_list[0]))
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+ if (unlikely(cqe->ext_bd_len_list[1])) {
+ DP_ERR(edev,
+ "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ qede_gro_ip_csum(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ qede_gro_ipv6_csum(skb);
+ break;
+ default:
+ DP_ERR(edev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ ntohs(skb->protocol));
+ }
+ }
+#endif
+ skb_record_rx_queue(skb, fp->rss_id);
+ qede_skb_receive(edev, fp, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ int i;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_agg_info *tpa_info;
+ struct sk_buff *skb;
+ int i;
+
+ tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ skb = tpa_info->skb;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA emd with more than a single len_list entry\n");
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto err;
+
+ /* Sanity */
+ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+ DP_ERR(edev,
+ "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+ cqe->num_of_bds, tpa_info->frag_id);
+ if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+ DP_ERR(edev,
+ "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+ le16_to_cpu(cqe->total_packet_len), skb->len);
+
+ memcpy(skb->data,
+ page_address(tpa_info->start_buf.data) +
+ tpa_info->start_cqe.placement_offset +
+ tpa_info->start_buf.page_offset,
+ le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
+
+ /* Recycle [mapped] start buffer for the next replacement */
+ tpa_info->replace_buf = tpa_info->start_buf;
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+
+ /* Finalize the SKB */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+ qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+
+ return;
+err:
+ /* The BD starting the aggregation is still mapped; Re-use it for
+ * future aggregations [as replacement buffer]
+ */
+ memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
+ sizeof(struct sw_rx_data));
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+ tpa_info->start_buf.data = NULL;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ dev_kfree_skb_any(tpa_info->skb);
+ tpa_info->skb = NULL;
+}
+
static u8 qede_check_csum(u16 flag)
{
u16 csum_flag = 0;
@@ -857,9 +1189,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
struct sk_buff *skb;
+ struct page *data;
+ __le16 flags;
u16 len, pad;
u32 rx_hash;
- u8 *data;
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)
@@ -873,62 +1206,135 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq,
+ &cqe->fast_path_tpa_start);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp,
+ &cqe->fast_path_tpa_end);
+ goto next_rx_only;
+ default:
+ break;
+ }
+ }
+
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
data = sw_rx_data->data;
fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->pkt_len);
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
pad = fp_cqe->placement_offset;
+ flags = cqe->fast_path_regular.pars_flags.flags;
- /* For every Rx BD consumed, we allocate a new BD so the BD ring
- * is always with a fixed size. If allocation fails, we take the
- * consumed BD and return it to the ring in the PROD position.
- * The packet that was received on that BD will be dropped (and
- * not passed to the upper stack).
- */
- if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(sw_rx_data, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
-
- /* If this is an error packet then drop it */
- parse_flag =
- le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
- csum_flag = qede_check_csum(parse_flag);
- if (csum_flag == QEDE_CSUM_ERROR) {
- DP_NOTICE(edev,
- "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- kfree(data);
- goto next_rx;
- }
-
- skb = build_skb(data, 0);
+ /* If this is an error packet then drop it */
+ parse_flag = le16_to_cpu(flags);
- if (unlikely(!skb)) {
- DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
- kfree(data);
- rxq->rx_alloc_errors++;
- goto next_rx;
- }
-
- skb_reserve(skb, pad);
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ qede_reuse_page(edev, rxq, sw_rx_data);
+ goto next_rx;
+ }
- } else {
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
DP_NOTICE(edev,
- "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
- qede_reuse_rx_data(rxq);
+ "Build_skb failed, dropping incoming packet\n");
+ qede_reuse_page(edev, rxq, sw_rx_data);
rxq->rx_alloc_errors++;
- goto next_cqe;
+ goto next_rx;
}
- sw_rx_data->data = NULL;
+ /* Copy data into SKB */
+ if (len + pad <= QEDE_RX_HDR_SIZE) {
+ memcpy(skb_put(skb, len),
+ page_address(data) + pad +
+ sw_rx_data->page_offset, len);
+ qede_reuse_page(edev, rxq, sw_rx_data);
+ } else {
+ struct skb_frag_struct *frag;
+ unsigned int pull_len;
+ unsigned char *va;
+
+ frag = &skb_shinfo(skb)->frags[0];
- skb_put(skb, len);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
+ pad + sw_rx_data->page_offset,
+ len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq,
+ sw_rx_data))) {
+ DP_ERR(edev, "Failed to allocate rx buffer\n");
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
+ }
+
+ if (fp_cqe->bd_num != 1) {
+ u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
+ u8 num_frags;
+
+ pkt_len -= len;
+
+ for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
+ num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ?
+ rxq->rx_buf_size : pkt_len;
+
+ WARN_ONCE(!cur_size,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ goto next_cqe;
+
+ rxq->sw_rx_cons++;
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ qed_chain_consume(&rxq->rx_bd_ring);
+ dma_unmap_page(&edev->pdev->dev,
+ sw_rx_data->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb,
+ skb_shinfo(skb)->nr_frags++,
+ sw_rx_data->data, 0,
+ cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
+
+ if (pkt_len)
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+ }
skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -945,9 +1351,9 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
qed_chain_consume(&rxq->rx_bd_ring);
-
next_rx:
rxq->sw_rx_cons++;
+next_rx_only:
rx_pkt++;
next_cqe: /* don't consume bd rx buffer */
@@ -1056,6 +1462,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev,
return edev->ops->filter_config(edev->cdev, &filter_cmd);
}
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ u16 vid)
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.vlan_valid = 1;
+ filter_cmd.filter.ucast.vlan = vid;
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
struct qed_eth_stats stats;
@@ -1168,6 +1589,247 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+ struct qed_update_vport_params params;
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (edev->accept_any_vlan == action)
+ return;
+
+ memset(&params, 0, sizeof(params));
+
+ params.vport_id = 0;
+ params.accept_any_vlan = action;
+ params.update_accept_any_vlan_flg = 1;
+
+ rc = edev->ops->vport_update(edev->cdev, &params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ edev->accept_any_vlan = action;
+ }
+}
+
+static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan, *tmp;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ DP_INFO(edev, "Failed to allocate struct for vlan\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&vlan->list);
+ vlan->vid = vid;
+ vlan->configured = false;
+
+ /* Verify vlan isn't already configured */
+ list_for_each_entry(tmp, &edev->vlan_list, list) {
+ if (tmp->vid == vlan->vid) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "vlan already configured\n");
+ kfree(vlan);
+ return -EEXIST;
+ }
+ }
+
+ /* If interface is down, cache this VLAN ID and return */
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, VLAN %d will be configured when interface is up\n",
+ vid);
+ if (vid != 0)
+ edev->non_configured_vlans++;
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+ }
+
+ /* Check for the filter limit.
+ * Note - vlan0 has a reserved filter and can be added without
+ * worrying about quota
+ */
+ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+ (vlan->vid == 0)) {
+ rc = qede_set_ucast_rx_vlan(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %d\n",
+ vlan->vid);
+ kfree(vlan);
+ return -EINVAL;
+ }
+ vlan->configured = true;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0)
+ edev->configured_vlans++;
+ } else {
+ /* Out of quota; Activate accept-any-VLAN mode */
+ if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, true);
+
+ edev->non_configured_vlans++;
+ }
+
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+ struct qede_vlan *vlan)
+{
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ if (vlan->configured)
+ edev->configured_vlans--;
+ else
+ edev->non_configured_vlans--;
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+ int rc = 0, real_rc = 0, accept_any_vlan = 0;
+ struct qed_dev_eth_info *dev_info;
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return 0;
+
+ dev_info = &edev->dev_info;
+
+ /* Configure non-configured vlans */
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (vlan->configured)
+ continue;
+
+ /* We have used all our credits, now enable accept_any_vlan */
+ if ((vlan->vid != 0) &&
+ (edev->configured_vlans == dev_info->num_vlan_filters)) {
+ accept_any_vlan = 1;
+ continue;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %u\n",
+ vlan->vid);
+ real_rc = rc;
+ continue;
+ }
+
+ vlan->configured = true;
+ /* vlan0 filter doesn't consume our VLAN filter's quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans--;
+ edev->configured_vlans++;
+ }
+ }
+
+ /* enable accept_any_vlan mode if we have more VLANs than credits,
+ * or remove accept_any_vlan mode if we've actually removed
+ * a non-configured vlan, and all remaining vlans are truly configured.
+ */
+
+ if (accept_any_vlan)
+ qede_config_accept_any_vlan(edev, true);
+ else if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, false);
+
+ return real_rc;
+}
+
+static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan = NULL;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+ /* Find whether entry exists */
+ list_for_each_entry(vlan, &edev->vlan_list, list)
+ if (vlan->vid == vid)
+ break;
+
+ if (!vlan || (vlan->vid != vid)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Vlan isn't configured\n");
+ return 0;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ /* As interface is already down, we don't have a VPORT
+ * instance to remove vlan filter. So just update vlan list
+ */
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, removing VLAN from list only\n");
+ qede_del_vlan_from_list(edev, vlan);
+ return 0;
+ }
+
+ /* Remove vlan */
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ qede_del_vlan_from_list(edev, vlan);
+
+ /* We have removed a VLAN - try to see if we can
+ * configure non-configured VLAN from the list.
+ */
+ rc = qede_configure_vlan_filters(edev);
+
+ return rc;
+}
+
+static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return;
+
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (!vlan->configured)
+ continue;
+
+ vlan->configured = false;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans++;
+ edev->configured_vlans--;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "marked vlan %d as non-configured\n",
+ vlan->vid);
+ }
+
+ edev->accept_any_vlan = false;
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1176,6 +1838,8 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64,
};
@@ -1220,6 +1884,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->num_tc = edev->dev_info.num_tc;
+ INIT_LIST_HEAD(&edev->vlan_list);
+
return edev;
}
@@ -1251,7 +1917,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
ndev->hw_features = hw_features;
@@ -1566,23 +2232,45 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
struct sw_rx_data *rx_buf;
- u8 *data;
+ struct page *data;
rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
data = rx_buf->data;
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_page(&edev->pdev->dev,
+ rx_buf->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
- kfree(data);
+ __free_page(data);
+ }
+}
+
+static void qede_free_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq) {
+ int i;
+
+ if (edev->gro_disable)
+ return;
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ if (replace_buf) {
+ dma_unmap_page(&edev->pdev->dev,
+ dma_unmap_addr(replace_buf, mapping),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(replace_buf->data);
+ }
}
}
static void qede_free_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
+ qede_free_sge_mem(edev, rxq);
+
/* Free rx buffers */
qede_free_rx_buffers(edev, rxq);
@@ -1600,29 +2288,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct sw_rx_data *sw_rx_data;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
+ struct page *data;
u16 rx_buf_size;
- u8 *data;
rx_buf_size = rxq->rx_buf_size;
- data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
- DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
return -ENOMEM;
}
- mapping = dma_map_single(&edev->pdev->dev, data,
- rx_buf_size, DMA_FROM_DEVICE);
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(&edev->pdev->dev, data, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- kfree(data);
+ __free_page(data);
DP_NOTICE(edev, "Failed to map Rx buffer\n");
return -ENOMEM;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
sw_rx_data->data = data;
-
- dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+ sw_rx_data->mapping = mapping;
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
@@ -1635,6 +2326,53 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
+static int qede_alloc_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ dma_addr_t mapping;
+ int i;
+
+ if (edev->gro_disable)
+ return 0;
+
+ if (edev->ndev->mtu > PAGE_SIZE) {
+ edev->gro_disable = 1;
+ return 0;
+ }
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!replace_buf->data)) {
+ DP_NOTICE(edev,
+ "Failed to allocate TPA skb pool [replacement buffer]\n");
+ goto err;
+ }
+
+ mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev,
+ "Failed to map TPA replacement buffer\n");
+ goto err;
+ }
+
+ dma_unmap_addr_set(replace_buf, mapping, mapping);
+ tpa_info->replace_buf.page_offset = 0;
+
+ tpa_info->replace_buf_mapping = mapping;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ }
+
+ return 0;
+err:
+ qede_free_sge_mem(edev, rxq);
+ edev->gro_disable = 1;
+ return -ENOMEM;
+}
+
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
@@ -1643,13 +2381,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN +
- ETH_OVERHEAD +
- edev->ndev->mtu +
- QEDE_FW_RX_ALIGN_END;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
+ edev->ndev->mtu;
+ if (rxq->rx_buf_size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE;
+
+ /* Segment size to spilt a page in multiple equal parts */
+ rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
/* Allocate the parallel driver ring for Rx buffers */
- size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
@@ -1660,7 +2401,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -1671,7 +2412,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
if (rc)
@@ -1693,6 +2434,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
num_allocated);
}
+ qede_alloc_sge_mem(edev, rxq);
+
return 0;
err:
@@ -1855,6 +2598,8 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, rss_id);
}
+
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
@@ -2088,11 +2833,12 @@ static int qede_stop_queues(struct qede_dev *edev)
static int qede_start_queues(struct qede_dev *edev)
{
int rc, tc, i;
- int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_start_vport_params start = {0};
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2100,10 +2846,13 @@ static int qede_start_queues(struct qede_dev *edev)
return -EINVAL;
}
- rc = edev->ops->vport_start(cdev, vport_id,
- edev->ndev->mtu,
- drop_ttl0_flg,
- vlan_removal_en);
+ start.gro_enable = !edev->gro_disable;
+ start.mtu = edev->ndev->mtu;
+ start.vport_id = 0;
+ start.drop_ttl0 = true;
+ start.remove_inner_vlan = vlan_removal_en;
+
+ rc = edev->ops->vport_start(cdev, &start);
if (rc) {
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
@@ -2112,7 +2861,7 @@ static int qede_start_queues(struct qede_dev *edev)
DP_VERBOSE(edev, NETIF_MSG_IFUP,
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
- vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+ start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
for_each_rss(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
@@ -2177,7 +2926,7 @@ static int qede_start_queues(struct qede_dev *edev)
/* Prepare and send the vport enable */
memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = vport_id;
+ vport_update_params.vport_id = start.vport_id;
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
@@ -2252,6 +3001,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Stopped Queues\n");
+ qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
/* Release the interrupts */
@@ -2320,6 +3070,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
edev->state = QEDE_STATE_OPEN;
mutex_unlock(&edev->qede_lock);
+ /* Program un-configured VLANs */
+ qede_configure_vlan_filters(edev);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2398,13 +3151,17 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
if (link->link_up) {
- DP_NOTICE(edev, "Link is up\n");
- netif_tx_start_all_queues(edev->ndev);
- netif_carrier_on(edev->ndev);
+ if (!netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ }
} else {
- DP_NOTICE(edev, "Link is down\n");
- netif_tx_disable(edev->ndev);
- netif_carrier_off(edev->ndev);
+ if (netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
}
}
@@ -2580,6 +3337,17 @@ static void qede_config_rx_mode(struct net_device *ndev)
goto out;
}
+ /* take care of VLAN mode */
+ if (ndev->flags & IFF_PROMISC) {
+ qede_config_accept_any_vlan(edev, true);
+ } else if (!edev->non_configured_vlans) {
+ /* It's possible that accept_any_vlan mode is set due to a
+ * previous setting of IFF_PROMISC. If vlan credits are
+ * sufficient, disable accept_any_vlan.
+ */
+ qede_config_accept_any_vlan(edev, false);
+ }
+
rx_mode.filter.accept_flags = accept_flags;
edev->ops->filter_config(edev->cdev, &rx_mode);
out:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 46bbea8e023c..55007f1e6bbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
u64 tx_dma_map_error;
u64 spurious_intr;
u64 mac_filter_limit_overrun;
+ u64 mbx_spurious_intr;
};
/*
@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
unsigned long status;
spinlock_t queue_lock; /* Mailbox queue lock */
spinlock_t aen_lock; /* Mailbox response/AEN lock */
- atomic_t rsp_status;
+ u32 rsp_status;
u32 num_cmds;
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 37a731be7d39..f9640d5ce6ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
{
- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
complete(&mbx->completion);
}
@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
out:
@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
}
@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
+ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
- u32 mask, resp, event;
unsigned long flags;
mbx = adapter->ahw->mailbox;
@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- else
- qlcnic_83xx_notify_mbx_response(mbx);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ else
+ adapter->stats.mbx_spurious_intr++;
+ }
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_adapter *adapter = mbx->adapter;
const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
- atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
struct qlcnic_hardware_context *ahw;
struct qlcnic_cmd_args *cmd = NULL;
+ unsigned long flags;
ahw = adapter->ahw;
@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
return;
}
- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
spin_lock(&mbx->queue_lock);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 494e8105adee..0a2318cad34d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
-
+ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+ QLC_OFF(stats.mbx_spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.34"
+#define DRV_VERSION "1.00.00.35"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 997976426799..b28e73ea2c25 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+
memcpy(skb_put(new_skb, length), skb->data, length);
+
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */