summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /drivers/net/ethernet/qlogic/qed
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
downloadlinux-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c6898
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c489
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c99
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c153
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1792
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h316
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c234
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h934
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2954
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c237
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c249
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
30 files changed, 16701 insertions, 1534 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index d1f157e439cf..cda0af7fbc20 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -2,5 +2,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o
+ qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 45ab74676573..653bb5735f0c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,10 +23,11 @@
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
+#include "qed_debug.h"
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.1.20"
+#define DRV_MODULE_VERSION "8.10.9.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -34,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
+#define QED_WID_SIZE (1024)
+#define QED_PF_DEMS_SIZE (4)
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -42,11 +46,21 @@ enum qed_coalescing_mode {
struct qed_eth_cb_ops;
struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * QED_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
@@ -69,6 +83,7 @@ struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
+struct qed_ll2_info;
struct qed_mcp_info;
struct qed_rt_data {
@@ -148,13 +163,17 @@ enum QED_RESOURCES {
QED_RL,
QED_MAC,
QED_VLAN,
+ QED_RDMA_CNQ_RAM,
QED_ILT,
+ QED_LL2_QUEUE,
+ QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
+ QED_RDMA_CNQ,
QED_MAX_FEATURES,
};
@@ -357,6 +376,9 @@ struct qed_hwfn {
struct qed_sb_attn_info *p_sb_attn;
/* Protocol related */
+ bool using_ll2;
+ struct qed_ll2_info *p_ll2_info;
+ struct qed_rdma_info *p_rdma_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -393,6 +415,19 @@ struct qed_hwfn {
/* Buffer for unzipping firmware data */
void *unzip_buf;
+ struct dbg_tools_data dbg_info;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+
+ /* This is used to calculate the doorbell address */
+ u32 dpi_start_offset;
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -402,6 +437,7 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -426,6 +462,21 @@ struct qed_int_params {
bool fp_initialized;
u8 fp_msix_base;
u8 fp_msix_cnt;
+ u8 rdma_msix_base;
+ u8 rdma_msix_cnt;
+};
+
+struct qed_dbg_feature {
+ struct dentry *dentry;
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+struct qed_dbg_params {
+ struct qed_dbg_feature features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+ bool print_data;
};
struct qed_dev {
@@ -442,6 +493,8 @@ struct qed_dev {
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
@@ -517,7 +570,6 @@ struct qed_dev {
bool b_is_vf;
u32 drv_type;
-
struct qed_eth_stats *reset_stats;
struct qed_fw_data *fw_data;
@@ -542,7 +594,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
+ struct qed_dbg_params dbg_params;
+
+#ifdef CONFIG_QED_LL2
+ struct qed_cb_ll2_info *ll2;
+ u8 ll2_mac_address[ETH_ALEN];
+#endif
+
const struct firmware *firmware;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
};
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
@@ -606,7 +669,9 @@ void qed_link_update(struct qed_hwfn *hwfn);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
-
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1c35f376143e..82370a1a59ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -48,7 +48,13 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
+#define ILT_DEFAULT_HW_P_SIZE 4
+#else
#define ILT_DEFAULT_HW_P_SIZE 3
+#endif
+
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -377,9 +383,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +410,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
return cnt;
}
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type, u32 count, bool has_fl)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +425,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
- u32 start_line, u32 total_size,
- u32 elem_size)
+ u32 start_line, u32 total_size, u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
@@ -448,8 +452,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_cli->first.val = *p_line;
p_cli->active = true;
- *p_line += DIV_ROUND_UP(p_blk->total_size,
- p_blk->real_size_in_page);
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -795,10 +798,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
@@ -926,12 +928,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
u32 size;
- size = min_t(u32, sz_left,
- p_blk->real_size_in_page);
+ size = min_t(u32, sz_left, p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- &p_phys,
- GFP_KERNEL);
+ size, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
@@ -963,7 +962,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
rc = -ENOMEM;
goto ilt_shadow_fail;
}
@@ -976,7 +974,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +983,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
}
@@ -1056,10 +1054,8 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
u32 i;
p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
- if (!p_mngr) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ if (!p_mngr)
return -ENOMEM;
- }
/* Initialize ILT client registers */
clients = p_mngr->clients;
@@ -1111,24 +1107,18 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = qed_ilt_shadow_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate the T2 table */
rc = qed_cxt_src_t2_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ if (rc)
goto tables_alloc_fail;
- }
return 0;
@@ -1672,7 +1662,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
tm_offset += tm_iids.pf_tids[i];
}
@@ -1702,8 +1692,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid)
+ enum protocol_type type, u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
@@ -1717,8 +1706,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
- DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
- type);
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
@@ -1730,8 +1718,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid,
- enum protocol_type *p_type)
+ u32 cid, enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1750,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
return true;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
@@ -1781,8 +1767,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
- struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
@@ -1860,6 +1845,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
+ if (p_hwfn->using_ll2)
+ core_cids += 4;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c6f6f2e8192d..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -170,6 +170,13 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 3656d2fd673d..130da1c0490b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -875,11 +875,8 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
int rc = 0;
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
- if (!p_hwfn->p_dcbx_info) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate 'struct qed_dcbx_info'\n");
+ if (!p_hwfn->p_dcbx_info)
rc = -ENOMEM;
- }
return rc;
}
@@ -1190,10 +1187,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
}
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ if (!dcbx_info)
return -ENOMEM;
- }
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
@@ -1227,10 +1222,8 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
struct qed_dcbx_get *dcbx_info;
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ if (!dcbx_info)
return NULL;
- }
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
@@ -1982,6 +1975,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
if (!dcbx_info->operational.ieee) {
DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
return -EINVAL;
}
@@ -2150,17 +2144,19 @@ static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
return rc;
}
-int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
{
return qed_dcbnl_get_ieee_ets(cdev, ets, true);
}
-int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
{
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
-int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
@@ -2204,7 +2200,7 @@ int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return 0;
}
-int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644
index 000000000000..88e7d5bef909
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -0,0 +1,6898 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Chip IDs enum */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_K2,
+ MAX_CHIP_IDS
+};
+
+/* Memory groups enum */
+enum mem_groups {
+ MEM_GROUP_PXP_MEM,
+ MEM_GROUP_DMAE_MEM,
+ MEM_GROUP_CM_MEM,
+ MEM_GROUP_QM_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_BRB_RAM,
+ MEM_GROUP_BRB_MEM,
+ MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
+ MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
+ MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CONN_CFC_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
+ MEM_GROUP_CAU_PI,
+ MEM_GROUP_CAU_MEM,
+ MEM_GROUP_PXP_ILT,
+ MEM_GROUP_MULD_MEM,
+ MEM_GROUP_BTB_MEM,
+ MEM_GROUP_IGU_MEM,
+ MEM_GROUP_IGU_MSIX,
+ MEM_GROUP_CAU_SB,
+ MEM_GROUP_BMB_RAM,
+ MEM_GROUP_BMB_MEM,
+ MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+ "PXP_MEM",
+ "DMAE_MEM",
+ "CM_MEM",
+ "QM_MEM",
+ "TM_MEM",
+ "BRB_RAM",
+ "BRB_MEM",
+ "PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
+ "IOR",
+ "RAM",
+ "BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CONN_CFC_MEM",
+ "TASK_CFC_MEM",
+ "CAU_PI",
+ "CAU_MEM",
+ "PXP_ILT",
+ "MULD_MEM",
+ "BTB_MEM",
+ "IGU_MEM",
+ "IGU_MSIX",
+ "CAU_SB",
+ "BMB_RAM",
+ "BMB_MEM",
+};
+
+/* Idle check conditions */
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+ return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+ return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) !=
+ (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+ return r[0] != imm[0];
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+ return r[0] & imm[0];
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+ return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+ return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+ cond0,
+ cond1,
+ cond2,
+ cond3,
+ cond4,
+ cond5,
+ cond6,
+ cond7,
+ cond8,
+ cond9,
+ cond10,
+ cond11,
+ cond12,
+};
+
+/******************************* Data Types **********************************/
+
+enum platform_ids {
+ PLATFORM_ASIC,
+ PLATFORM_RESERVED,
+ PLATFORM_RESERVED2,
+ PLATFORM_RESERVED3,
+ MAX_PLATFORM_IDS
+};
+
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+ const char *name;
+ struct {
+ u8 num_ports;
+ u8 num_pfs;
+ } per_platform[MAX_PLATFORM_IDS];
+};
+
+/* Platform constant definitions */
+struct platform_defs {
+ const char *name;
+ u32 delay_factor;
+};
+
+/* Storm constant definitions */
+struct storm_defs {
+ char letter;
+ enum block_id block_id;
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ bool has_vfc;
+ u32 sem_fast_mem_addr;
+ u32 sem_frame_mode_addr;
+ u32 sem_slow_enable_addr;
+ u32 sem_slow_mode_addr;
+ u32 sem_slow_mode1_conf_addr;
+ u32 sem_sync_dbg_empty_addr;
+ u32 sem_slow_dbg_empty_addr;
+ u32 cm_ctx_wr_addr;
+ u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_rd_addr;
+ u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_rd_addr;
+ u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_rd_addr;
+ u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_rd_addr;
+};
+
+/* Block constant definitions */
+struct block_defs {
+ const char *name;
+ bool has_dbg_bus[MAX_CHIP_IDS];
+ bool associated_to_storm;
+ u32 storm_id; /* Valid only if associated_to_storm is true */
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ u32 dbg_select_addr;
+ u32 dbg_cycle_enable_addr;
+ u32 dbg_shift_addr;
+ u32 dbg_force_valid_addr;
+ u32 dbg_force_frame_addr;
+ bool has_reset_bit;
+ bool unreset; /* If true, the block is taken out of reset before dump */
+ enum dbg_reset_regs reset_reg;
+ u8 reset_bit_offset; /* Bit offset in reset register */
+};
+
+/* Reset register definitions */
+struct reset_reg_defs {
+ u32 addr;
+ u32 unreset_val;
+ bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+ u32 default_val[MAX_CHIP_IDS];
+ u32 min;
+ u32 max;
+ bool is_preset;
+ u32 exclude_all_preset_val;
+ u32 crash_preset_val;
+};
+
+struct rss_mem_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 addr; /* In 128b units */
+ u32 num_entries[MAX_CHIP_IDS];
+ u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+};
+
+struct vfc_ram_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 base_row;
+ u32 num_rows;
+};
+
+struct big_ram_defs {
+ const char *instance_name;
+ enum mem_groups mem_group_id;
+ enum mem_groups ram_mem_group_id;
+ enum dbg_grc_params grc_param;
+ u32 addr_reg_addr;
+ u32 data_reg_addr;
+ u32 num_of_blocks[MAX_CHIP_IDS];
+};
+
+struct phy_defs {
+ const char *phy_name;
+ u32 base_addr;
+ u32 tbus_addr_lo_addr;
+ u32 tbus_addr_hi_addr;
+ u32 tbus_data_lo_addr;
+ u32 tbus_data_hi_addr;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_LCIDS 320
+#define MAX_LTIDS 320
+#define NUM_IOR_SETS 2
+#define IORS_PER_SET 176
+#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+#define BYTES_IN_DWORD sizeof(u32)
+
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+ (int)(FIELD_BIT_OFFSET(type, field) / 32)
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+ FIELD_DWORD_SHIFT(type, field))
+#define SET_VAR_FIELD(var, type, field, val) \
+ do { \
+ var[FIELD_DWORD_OFFSET(type, field)] &= \
+ (~FIELD_BIT_MASK(type, field)); \
+ var[FIELD_DWORD_OFFSET(type, field)] |= \
+ (val) << FIELD_DWORD_SHIFT(type, field); \
+ } while (0)
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, addr, (arr)[i]); \
+ } while (0)
+#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ (arr)[i] = qed_rd(dev, ptt, addr); \
+ } while (0)
+
+#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+#define REG_DUMP_LEN_SHIFT 24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define IDLE_CHK_MAX_ENTRIES_SIZE 32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE 64
+#define VFC_CAM_CMD_ROW_OFFSET 48
+#define VFC_CAM_CMD_ROW_SIZE 9
+#define VFC_CAM_ADDR_STRUCT_SIZE 16
+#define VFC_CAM_ADDR_OP_OFFSET 0
+#define VFC_CAM_ADDR_OP_SIZE 4
+#define VFC_CAM_RESP_STRUCT_SIZE 256
+#define VFC_RAM_ADDR_STRUCT_SIZE 16
+#define VFC_RAM_ADDR_OP_OFFSET 0
+#define VFC_RAM_ADDR_OP_SIZE 2
+#define VFC_RAM_ADDR_ROW_OFFSET 2
+#define VFC_RAM_ADDR_ROW_SIZE 10
+#define VFC_RAM_RESP_STRUCT_SIZE 256
+#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+#define NUM_VFC_RAM_TYPES 4
+#define VFC_CAM_NUM_ROWS 512
+#define VFC_OPCODE_CAM_RD 14
+#define VFC_OPCODE_RAM_RD 0
+#define NUM_RSS_MEM_TYPES 5
+#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_BLOCK_SIZE_BYTES 128
+#define BIG_RAM_BLOCK_SIZE_DWORDS \
+ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+#define NUM_PHY_TBUS_ADDRESSES 2048
+#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+#define RESET_REG_UNRESET_OFFSET 4
+#define STALL_DELAY_MS 500
+#define STATIC_DEBUG_LINE_DWORDS 9
+#define NUM_DBG_BUS_LINES 256
+#define NUM_COMMON_GLOBAL_PARAMS 8
+#define FW_IMG_MAIN 1
+#define REG_FIFO_DEPTH_ELEMENTS 32
+#define REG_FIFO_ELEMENT_DWORDS 2
+#define REG_FIFO_DEPTH_DWORDS \
+ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+#define IGU_FIFO_DEPTH_ELEMENTS 64
+#define IGU_FIFO_ELEMENT_DWORDS 4
+#define IGU_FIFO_DEPTH_DWORDS \
+ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+ (MCP_REG_SCRATCH + \
+ offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+#define EMPTY_FW_VERSION_STR "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* Debug arrays */
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+ { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
+ { "bb_b0",
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
+ { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+ /* Tstorm */
+ {'T', BLOCK_TSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TCM_REG_CTX_RBC_ACCS,
+ 4, TCM_REG_AGG_CON_CTX,
+ 16, TCM_REG_SM_CON_CTX,
+ 2, TCM_REG_AGG_TASK_CTX,
+ 4, TCM_REG_SM_TASK_CTX},
+ /* Mstorm */
+ {'M', BLOCK_MSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCM}, false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MCM_REG_CTX_RBC_ACCS,
+ 1, MCM_REG_AGG_CON_CTX,
+ 10, MCM_REG_SM_CON_CTX,
+ 2, MCM_REG_AGG_TASK_CTX,
+ 7, MCM_REG_SM_TASK_CTX},
+ /* Ustorm */
+ {'U', BLOCK_USEM,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ UCM_REG_CTX_RBC_ACCS,
+ 2, UCM_REG_AGG_CON_CTX,
+ 13, UCM_REG_SM_CON_CTX,
+ 3, UCM_REG_AGG_TASK_CTX,
+ 3, UCM_REG_SM_TASK_CTX},
+ /* Xstorm */
+ {'X', BLOCK_XSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XCM_REG_CTX_RBC_ACCS,
+ 9, XCM_REG_AGG_CON_CTX,
+ 15, XCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0},
+ /* Ystorm */
+ {'Y', BLOCK_YSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCY}, false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YCM_REG_CTX_RBC_ACCS,
+ 2, YCM_REG_AGG_CON_CTX,
+ 3, YCM_REG_SM_CON_CTX,
+ 2, YCM_REG_AGG_TASK_CTX,
+ 12, YCM_REG_SM_TASK_CTX},
+ /* Pstorm */
+ {'P', BLOCK_PSEM,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PCM_REG_CTX_RBC_ACCS,
+ 0, 0,
+ 10, PCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0}
+};
+
+/* Block definitions array */
+static struct block_defs block_grc_defs = {
+ "grc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
+ GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
+ GRC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_UA, 1
+};
+
+static struct block_defs block_miscs_defs = {
+ "miscs", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_defs = {
+ "misc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbu_defs = {
+ "dbu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pglue_b_defs = {
+ "pglue_b", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
+ PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
+ PGLUE_B_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 1
+};
+
+static struct block_defs block_cnig_defs = {
+ "cnig", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
+ CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
+ CNIG_REG_DBG_FORCE_FRAME_K2,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 0
+};
+
+static struct block_defs block_cpmu_defs = {
+ "cpmu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 8
+};
+
+static struct block_defs block_ncsi_defs = {
+ "ncsi", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
+ NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
+ NCSI_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 5
+};
+
+static struct block_defs block_opte_defs = {
+ "opte", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 4
+};
+
+static struct block_defs block_bmb_defs = {
+ "bmb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
+ BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
+ BMB_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 7
+};
+
+static struct block_defs block_pcie_defs = {
+ "pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp_defs = {
+ "mcp", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp2_defs = {
+ "mcp2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
+ MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
+ MCP2_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pswhst_defs = {
+ "pswhst", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
+ PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
+ PSWHST_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswhst2_defs = {
+ "pswhst2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
+ PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
+ PSWHST2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswrd_defs = {
+ "pswrd", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
+ PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
+ PSWRD_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswrd2_defs = {
+ "pswrd2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
+ PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
+ PSWRD2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswwr_defs = {
+ "pswwr", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
+ PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
+ PSWWR_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswwr2_defs = {
+ "pswwr2", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswrq_defs = {
+ "pswrq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
+ PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
+ PSWRQ_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pswrq2_defs = {
+ "pswrq2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
+ PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
+ PSWRQ2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pglcs_defs = {
+ "pglcs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
+ PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
+ PGLCS_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 2
+};
+
+static struct block_defs block_ptu_defs = {
+ "ptu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
+ PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
+ PTU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
+};
+
+static struct block_defs block_dmae_defs = {
+ "dmae", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
+ DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
+ DMAE_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
+};
+
+static struct block_defs block_tcm_defs = {
+ "tcm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
+ TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
+ TCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
+};
+
+static struct block_defs block_mcm_defs = {
+ "mcm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
+ MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
+ MCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
+};
+
+static struct block_defs block_ucm_defs = {
+ "ucm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
+ UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
+ UCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
+};
+
+static struct block_defs block_xcm_defs = {
+ "xcm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
+ XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
+ XCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
+};
+
+static struct block_defs block_ycm_defs = {
+ "ycm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
+ YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
+ YCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
+};
+
+static struct block_defs block_pcm_defs = {
+ "pcm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
+ PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
+ PCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
+};
+
+static struct block_defs block_qm_defs = {
+ "qm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
+ QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
+ QM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
+};
+
+static struct block_defs block_tm_defs = {
+ "tm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
+ TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
+ TM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
+};
+
+static struct block_defs block_dorq_defs = {
+ "dorq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
+ DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
+ DORQ_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
+};
+
+static struct block_defs block_brb_defs = {
+ "brb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
+ BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
+ BRB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
+};
+
+static struct block_defs block_src_defs = {
+ "src", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
+ SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
+ SRC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
+};
+
+static struct block_defs block_prs_defs = {
+ "prs", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
+ PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
+ PRS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
+};
+
+static struct block_defs block_tsdm_defs = {
+ "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
+ TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
+ TSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
+};
+
+static struct block_defs block_msdm_defs = {
+ "msdm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
+ MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
+ MSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
+};
+
+static struct block_defs block_usdm_defs = {
+ "usdm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
+ USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
+ USDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
+};
+
+static struct block_defs block_xsdm_defs = {
+ "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
+ XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
+ XSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
+};
+
+static struct block_defs block_ysdm_defs = {
+ "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
+ YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
+ YSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
+};
+
+static struct block_defs block_psdm_defs = {
+ "psdm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
+ PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
+ PSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
+};
+
+static struct block_defs block_tsem_defs = {
+ "tsem", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
+ TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
+ TSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
+};
+
+static struct block_defs block_msem_defs = {
+ "msem", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
+ MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
+ MSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
+};
+
+static struct block_defs block_usem_defs = {
+ "usem", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
+ USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
+ USEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
+};
+
+static struct block_defs block_xsem_defs = {
+ "xsem", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
+ XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
+ XSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
+};
+
+static struct block_defs block_ysem_defs = {
+ "ysem", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
+ YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
+ YSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
+};
+
+static struct block_defs block_psem_defs = {
+ "psem", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
+ PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
+ PSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
+};
+
+static struct block_defs block_rss_defs = {
+ "rss", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
+ RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
+ RSS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
+};
+
+static struct block_defs block_tmld_defs = {
+ "tmld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
+ TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
+ TMLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
+};
+
+static struct block_defs block_muld_defs = {
+ "muld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
+ MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
+ MULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
+};
+
+static struct block_defs block_yuld_defs = {
+ "yuld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
+ YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
+ YULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+};
+
+static struct block_defs block_xyld_defs = {
+ "xyld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
+ XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
+ XYLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
+};
+
+static struct block_defs block_prm_defs = {
+ "prm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
+ PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
+ PRM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
+};
+
+static struct block_defs block_pbf_pb1_defs = {
+ "pbf_pb1", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
+ PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
+ PBF_PB1_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 11
+};
+
+static struct block_defs block_pbf_pb2_defs = {
+ "pbf_pb2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
+ PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
+ PBF_PB2_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 12
+};
+
+static struct block_defs block_rpb_defs = {
+ "rpb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
+ RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
+ RPB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
+};
+
+static struct block_defs block_btb_defs = {
+ "btb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
+ BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
+ BTB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
+};
+
+static struct block_defs block_pbf_defs = {
+ "pbf", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
+ PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
+ PBF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
+};
+
+static struct block_defs block_rdif_defs = {
+ "rdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
+ RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
+ RDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
+};
+
+static struct block_defs block_tdif_defs = {
+ "tdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
+ TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
+ TDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
+};
+
+static struct block_defs block_cdu_defs = {
+ "cdu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
+ CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
+ CDU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
+};
+
+static struct block_defs block_ccfc_defs = {
+ "ccfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
+ CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
+ CCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
+};
+
+static struct block_defs block_tcfc_defs = {
+ "tcfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
+ TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
+ TCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
+};
+
+static struct block_defs block_igu_defs = {
+ "igu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
+ IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
+ IGU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
+};
+
+static struct block_defs block_cau_defs = {
+ "cau", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
+ CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
+ CAU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
+};
+
+static struct block_defs block_umac_defs = {
+ "umac", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
+ UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
+ UMAC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 6
+};
+
+static struct block_defs block_xmac_defs = {
+ "xmac", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbg_defs = {
+ "dbg", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
+};
+
+static struct block_defs block_nig_defs = {
+ "nig", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
+ NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
+ NIG_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
+};
+
+static struct block_defs block_wol_defs = {
+ "wol", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
+ WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
+ WOL_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
+};
+
+static struct block_defs block_bmbn_defs = {
+ "bmbn", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
+ BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
+ BMBN_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ipc_defs = {
+ "ipc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 8
+};
+
+static struct block_defs block_nwm_defs = {
+ "nwm", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
+ NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
+ NWM_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
+};
+
+static struct block_defs block_nws_defs = {
+ "nws", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 12
+};
+
+static struct block_defs block_ms_defs = {
+ "ms", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 13
+};
+
+static struct block_defs block_phy_pcie_defs = {
+ "phy_pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_led_defs = {
+ "led", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_misc_aeu_defs = {
+ "misc_aeu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_bar0_map_defs = {
+ "bar0_map", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
+ &block_grc_defs,
+ &block_miscs_defs,
+ &block_misc_defs,
+ &block_dbu_defs,
+ &block_pglue_b_defs,
+ &block_cnig_defs,
+ &block_cpmu_defs,
+ &block_ncsi_defs,
+ &block_opte_defs,
+ &block_bmb_defs,
+ &block_pcie_defs,
+ &block_mcp_defs,
+ &block_mcp2_defs,
+ &block_pswhst_defs,
+ &block_pswhst2_defs,
+ &block_pswrd_defs,
+ &block_pswrd2_defs,
+ &block_pswwr_defs,
+ &block_pswwr2_defs,
+ &block_pswrq_defs,
+ &block_pswrq2_defs,
+ &block_pglcs_defs,
+ &block_dmae_defs,
+ &block_ptu_defs,
+ &block_tcm_defs,
+ &block_mcm_defs,
+ &block_ucm_defs,
+ &block_xcm_defs,
+ &block_ycm_defs,
+ &block_pcm_defs,
+ &block_qm_defs,
+ &block_tm_defs,
+ &block_dorq_defs,
+ &block_brb_defs,
+ &block_src_defs,
+ &block_prs_defs,
+ &block_tsdm_defs,
+ &block_msdm_defs,
+ &block_usdm_defs,
+ &block_xsdm_defs,
+ &block_ysdm_defs,
+ &block_psdm_defs,
+ &block_tsem_defs,
+ &block_msem_defs,
+ &block_usem_defs,
+ &block_xsem_defs,
+ &block_ysem_defs,
+ &block_psem_defs,
+ &block_rss_defs,
+ &block_tmld_defs,
+ &block_muld_defs,
+ &block_yuld_defs,
+ &block_xyld_defs,
+ &block_prm_defs,
+ &block_pbf_pb1_defs,
+ &block_pbf_pb2_defs,
+ &block_rpb_defs,
+ &block_btb_defs,
+ &block_pbf_defs,
+ &block_rdif_defs,
+ &block_tdif_defs,
+ &block_cdu_defs,
+ &block_ccfc_defs,
+ &block_tcfc_defs,
+ &block_igu_defs,
+ &block_cau_defs,
+ &block_umac_defs,
+ &block_xmac_defs,
+ &block_dbg_defs,
+ &block_nig_defs,
+ &block_wol_defs,
+ &block_bmbn_defs,
+ &block_ipc_defs,
+ &block_nwm_defs,
+ &block_nws_defs,
+ &block_ms_defs,
+ &block_phy_pcie_defs,
+ &block_led_defs,
+ &block_misc_aeu_defs,
+ &block_bar0_map_defs,
+};
+
+static struct platform_defs s_platform_defs[] = {
+ {"asic", 1},
+ {"reserved", 0},
+ {"reserved2", 0},
+ {"reserved3", 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+ { "rss_mem_cid", "rss_cid", 0,
+ {256, 256, 320},
+ {32, 32, 32} },
+ { "rss_mem_key_msb", "rss_key", 1024,
+ {128, 128, 208},
+ {256, 256, 256} },
+ { "rss_mem_key_lsb", "rss_key", 2048,
+ {128, 128, 208},
+ {64, 64, 64} },
+ { "rss_mem_info", "rss_info", 3072,
+ {128, 128, 208},
+ {16, 16, 16} },
+ { "rss_mem_ind", "rss_ind", 4096,
+ {(128 * 128), (128 * 128), (128 * 208)},
+ {16, 16, 16} }
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+ {"vfc_ram_tt1", "vfc_ram", 0, 512},
+ {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+ {"vfc_ram_stt2", "vfc_ram", 640, 32},
+ {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+ { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ {4800, 4800, 5632} },
+ { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ {2880, 2880, 3680} },
+ { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ {1152, 1152, 1152} }
+};
+
+static struct reset_reg_defs s_reset_regs_defs[] = {
+ { MISCS_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ { MISCS_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ { MISCS_REG_RESET_PL_HV_2, 0x0,
+ {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISC_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ { MISC_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+};
+
+static struct phy_defs s_phy_defs[] = {
+ {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
+ {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+};
+
+/**************************** Private Functions ******************************/
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+ u32 dword;
+
+ memcpy((u8 *)&dword, buf, sizeof(dword));
+ return dword;
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (dev_data->initialized)
+ return DBG_STATUS_OK;
+
+ if (QED_IS_K2(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_K2;
+ dev_data->mode_enable[MODE_K2] = 1;
+ } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_BB_B0;
+ dev_data->mode_enable[MODE_BB_B0] = 1;
+ } else {
+ return DBG_STATUS_UNKNOWN_CHIP;
+ }
+
+ dev_data->platform_id = PLATFORM_ASIC;
+ dev_data->mode_enable[MODE_ASIC] = 1;
+ dev_data->initialized = true;
+ return DBG_STATUS_OK;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
+{
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(struct fw_info_location);
+ struct fw_info_location fw_info_location;
+ u32 *dest = (u32 *)&fw_info_location;
+ u32 i;
+
+ memset(&fw_info_location, 0, sizeof(fw_info_location));
+ memset(fw_info, 0, sizeof(*fw_info));
+ for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ if (fw_info_location.size > 0 && fw_info_location.size <=
+ sizeof(*fw_info)) {
+ /* Read FW version info from Storm RAM */
+ addr = fw_info_location.grc_addr;
+ dest = (u32 *)fw_info;
+ for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+/* Dumps the specified string to the specified buffer. Returns the dumped size
+ * in bytes (actual length + 1 for the null character termination).
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+ if (dump)
+ strcpy(dump_buf, str);
+ return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
+ * in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+ u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+
+ align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+ if (dump && align_size)
+ memset(dump_buf, 0, align_size);
+ return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+ bool dump,
+ const char *param_name, const char *param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a string param value */
+ if (dump)
+ *(char_buf + offset) = 1;
+ offset++;
+
+ /* Dump param value */
+ offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+ return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+ bool dump, const char *param_name, u32 param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a numeric param value */
+ if (dump)
+ *(char_buf + offset) = 0;
+ offset++;
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ /* Dump param value (and change offset from bytes to dwords) */
+ offset = BYTES_TO_DWORDS(offset);
+ if (dump)
+ *(dump_buf + offset) = param_val;
+ offset++;
+ return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+ char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+ struct fw_info fw_info = { {0}, {0} };
+ int printed_chars;
+ u32 offset = 0;
+
+ if (dump) {
+ /* Read FW image/version from PRAM in a non-reset SEMI */
+ bool found = false;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+ storm_id++) {
+ /* Read FW version/image */
+ if (!dev_data->block_in_reset
+ [s_storm_defs[storm_id].block_id]) {
+ /* read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn,
+ p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ printed_chars =
+ snprintf(fw_ver_str,
+ sizeof(fw_ver_str),
+ "%d_%d_%d_%d",
+ fw_info.ver.num.major,
+ fw_info.ver.num.minor,
+ fw_info.ver.num.rev,
+ fw_info.ver.num.eng);
+ if (printed_chars < 0 || printed_chars >=
+ sizeof(fw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
+ }
+
+ found = true;
+ }
+ }
+ }
+
+ /* Dump FW version, image and timestamp */
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-version", fw_ver_str);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-image", fw_img_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fw-timestamp", fw_info.ver.timestamp);
+ return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+ if (dump) {
+ u32 global_section_offsize, global_section_addr, mfw_ver;
+ u32 public_data_addr, global_section_offsize_addr;
+ int printed_chars;
+
+ /* Find MCP public data GRC address.
+ * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ */
+ public_data_addr = qed_rd(p_hwfn, p_ptt,
+ MISC_REG_SHARED_MEM_ADDR) |
+ MCP_REG_SCRATCH;
+
+ /* Find MCP public global section offset */
+ global_section_offsize_addr = public_data_addr +
+ offsetof(struct mcp_public_data,
+ sections) +
+ sizeof(offsize_t) * PUBLIC_GLOBAL;
+ global_section_offsize = qed_rd(p_hwfn, p_ptt,
+ global_section_offsize_addr);
+ global_section_addr = MCP_REG_SCRATCH +
+ (global_section_offsize &
+ OFFSIZE_OFFSET_MASK) * 4;
+
+ /* Read MFW version from MCP public global section */
+ mfw_ver = qed_rd(p_hwfn, p_ptt,
+ global_section_addr +
+ offsetof(struct public_global, mfw_ver));
+
+ /* Dump MFW version param */
+ printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
+ "%d_%d_%d_%d",
+ (u8) (mfw_ver >> 24),
+ (u8) (mfw_ver >> 16),
+ (u8) (mfw_ver >> 8),
+ (u8) mfw_ver);
+ if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid MFW version string\n");
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+ bool dump, const char *name, u32 num_params)
+{
+ return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 num_specific_global_params)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+
+ /* Find platform string and dump global params section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump,
+ "global_params",
+ NUM_COMMON_GLOBAL_PARAMS +
+ num_specific_global_params);
+
+ /* Store params */
+ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_mfw_ver_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "tools-version", TOOLS_VERSION);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "chip",
+ s_chip_defs[dev_data->chip_id].name);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "platform",
+ s_platform_defs[dev_data->platform_id].
+ name);
+ offset +=
+ qed_dump_num_param(dump_buf + offset, dump, "pci-func",
+ p_hwfn->abs_pf_id);
+ return offset;
+}
+
+/* Writes the last section to the specified buffer at the given offset.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+ u32 start_offset = offset, crc = ~0;
+
+ /* Dump CRC section header */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+ /* Calculate CRC32 and add it to the dword following the "last" section.
+ */
+ if (dump)
+ *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ DWORDS_TO_BYTES(offset));
+ offset++;
+ return offset - start_offset;
+}
+
+/* Update blocks reset state */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Read reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++)
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id])
+ reg_val[i] = qed_rd(p_hwfn,
+ p_ptt, s_reset_regs_defs[i].addr);
+
+ /* Check if blocks are in reset */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ dev_data->block_in_reset[i] =
+ s_block_defs[i]->has_reset_bit &&
+ !(reg_val[s_block_defs[i]->reset_reg] &
+ BIT(s_block_defs[i]->reset_bit_offset));
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+
+ dbg_reset_reg_addr =
+ s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
+ new_reset_reg_val = old_reset_reg_val &
+ ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
+}
+
+static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_bus_frame_modes mode)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask.
+ * (1 = enable, 0 = disable)
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 client_mask)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ bool arg1, arg2;
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match(p_hwfn, modes_buf_offset);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ }
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Clear all GRC params */
+static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_set_by_user[i] = 0;
+}
+
+/* Assign default GRC param values */
+static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ if (!dev_data->grc.param_set_by_user[i])
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+ enum dbg_storms storm)
+{
+ return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, u8 mem_group_id)
+{
+ u8 i;
+
+ /* Check Storm match */
+ if (s_block_defs[block_id]->associated_to_storm &&
+ !qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ return false;
+
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
+ mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn,
+ s_big_ram_defs[i].grc_param);
+ if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
+ MEM_GROUP_PXP_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ if (mem_group_id == MEM_GROUP_RAM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ if (mem_group_id == MEM_GROUP_PBUF)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ if (mem_group_id == MEM_GROUP_CAU_MEM ||
+ mem_group_id == MEM_GROUP_CAU_SB ||
+ mem_group_id == MEM_GROUP_CAU_PI)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ if (mem_group_id == MEM_GROUP_QM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
+ mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
+ MEM_GROUP_IGU_MSIX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ if (mem_group_id == MEM_GROUP_MULD_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ if (mem_group_id == MEM_GROUP_PRS_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ if (mem_group_id == MEM_GROUP_TM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ if (mem_group_id == MEM_GROUP_SDM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
+ MEM_GROUP_RDIF_CTX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ if (mem_group_id == MEM_GROUP_CM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ if (mem_group_id == MEM_GROUP_IOR)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+
+ return true;
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool stall)
+{
+ u8 reg_val = stall ? 1 : 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ u32 reg_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0;
+
+ qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
+ }
+ }
+
+ msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Fill reset regs values */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
+ reg_val[s_block_defs[i]->reset_reg] |=
+ BIT(s_block_defs[i]->reset_bit_offset);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ }
+ }
+}
+
+/* Returns the attention name offsets of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+{
+ const struct dbg_attn_block *base_attn_block_arr =
+ (const struct dbg_attn_block *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+ return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+ u8 *num_attn_regs)
+{
+ const struct dbg_attn_block_type_data *block_type_data =
+ qed_get_block_attn_data(block_id, attn_type);
+
+ *num_attn_regs = block_type_data->num_regs;
+ return &((const struct dbg_attn_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
+ regs_offset];
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 reg_idx, num_attn_regs;
+ u32 block_id;
+
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id])
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+
+ /* Check mode */
+ bool eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ /* Mode match - read parity status read-clear
+ * register.
+ */
+ qed_rd(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(reg_data->
+ sts_clr_address));
+ }
+ }
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * The following parameters are dumped:
+ * - 'count' = num_dumped_entries
+ * - 'split' = split_type
+ * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
+ * param_val != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+ bool dump,
+ u32 num_reg_entries,
+ const char *split_type,
+ int split_id,
+ const char *param_name, const char *param_val)
+{
+ u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+ u32 offset = 0;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_regs", num_params);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "count", num_reg_entries);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "split", split_type);
+ if (split_id >= 0)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "id", split_id);
+ if (param_name && param_val)
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, param_name, param_val);
+ return offset;
+}
+
+/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 offset = 0, i;
+
+ if (dump) {
+ *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
+ for (i = 0; i < len; i++, addr++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr));
+ } else {
+ offset += len + 1;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ u32 *num_dumped_reg_entries)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ *num_dumped_reg_entries = 0;
+ while (input_offset < input_regs_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr =
+ (const struct dbg_dump_cond_hdr *)
+ &input_regs_arr.ptr[input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check mode/block */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match && block_enable[cond_hdr->block_id]) {
+ for (i = 0; i < cond_hdr->data_size;
+ i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset, dump,
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS),
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_LENGTH));
+ (*num_dumped_reg_entries)++;
+ }
+ } else {
+ input_offset += cond_hdr->data_size;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *split_type_name,
+ u32 split_id,
+ const char *param_name,
+ const char *param_val)
+{
+ u32 num_dumped_reg_entries, offset;
+
+ /* Calculate register dump header size (and skip it for now) */
+ offset = qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ /* Dump registers */
+ offset += qed_grc_dump_regs_entries(p_hwfn,
+ p_ptt,
+ input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ &num_dumped_reg_entries);
+
+ /* Write register dump header */
+ if (dump && num_dumped_reg_entries > 0)
+ qed_grc_dump_regs_hdr(dump_buf,
+ dump,
+ num_dumped_reg_entries,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *param_name, const char *param_val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, input_offset = 0;
+ u8 port_id, pf_id;
+
+ if (dump)
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_regs_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ case SPLIT_TYPE_VF:
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ "eng",
+ (u32)(-1),
+ param_name,
+ param_val);
+ break;
+ case SPLIT_TYPE_PORT:
+ for (port_id = 0;
+ port_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_ports;
+ port_id++) {
+ if (dump)
+ qed_port_pretend(p_hwfn, p_ptt,
+ port_id);
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "port", port_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_PF:
+ case SPLIT_TYPE_PORT_PF:
+ for (pf_id = 0;
+ pf_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_pfs;
+ pf_id++) {
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id, param_name,
+ param_val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ /* Pretend to original PF */
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i, offset = 0, num_regs = 0;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs
+ [i].addr), 1);
+ num_regs++;
+ }
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true, num_regs, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be dumped
+ * first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, num_reg_entries = 0, block_id;
+ u8 storm_id, reg_idx, num_attn_regs;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write parity registers */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id] && dump)
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
+ /* Mode match - read and dump registers */
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ reg_data->mask_address,
+ 1);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS),
+ 1);
+ num_reg_entries += 2;
+ }
+ }
+ }
+
+ /* Write storm stall status registers */
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
+ dump)
+ continue;
+
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].
+ sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED),
+ 1);
+ num_reg_entries++;
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dumps a GRC memory header (section and params).
+ * The following parameters are dumped:
+ * name - name is dumped only if it's not NULL.
+ * addr - byte_addr is dumped only if name is NULL.
+ * len - dword_len is always dumped.
+ * width - bit_width is dumped if it's not zero.
+ * packed - packed=1 is dumped if it's not false.
+ * mem_group - mem_group is always dumped.
+ * is_storm - true only if the memory is related to a Storm.
+ * storm_letter - storm letter (valid only if is_storm is true).
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u8 num_params = 3;
+ u32 offset = 0;
+ char buf[64];
+
+ if (!dword_len)
+ DP_NOTICE(p_hwfn,
+ "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+ if (bit_width)
+ num_params++;
+ if (packed)
+ num_params++;
+
+ /* Dump section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_mem", num_params);
+ if (name) {
+ /* Dump name */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), name);
+ } else {
+ strcpy(buf, name);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "name", buf);
+ if (dump)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from %s...\n",
+ dword_len, buf);
+ } else {
+ /* Dump address */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "addr", byte_addr);
+ if (dump && dword_len > 64)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from address 0x%x...\n",
+ dword_len, byte_addr);
+ }
+
+ /* Dump len */
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+
+ /* Dump bit width */
+ if (bit_width)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "width", bit_width);
+
+ /* Dump packed */
+ if (packed)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "packed", 1);
+
+ /* Dump reg type */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), mem_group);
+ } else {
+ strcpy(buf, mem_group);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+ return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ byte_addr,
+ dword_len,
+ bit_width,
+ packed,
+ mem_group, is_storm, storm_letter);
+ if (dump) {
+ u32 i;
+
+ for (i = 0; i < dword_len;
+ i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ } else {
+ offset += dword_len;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_mems_arr,
+ u32 *dump_buf, bool dump)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ while (input_offset < input_mems_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr;
+ u32 num_entries;
+ bool eval_mode;
+
+ cond_hdr = (const struct dbg_dump_cond_hdr *)
+ &input_mems_arr.ptr[input_offset++];
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check required mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+ for (i = 0; i < num_entries;
+ i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+ const struct dbg_dump_mem *mem =
+ (const struct dbg_dump_mem *)
+ &input_mems_arr.ptr[input_offset];
+ u8 mem_group_id;
+
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ if (mem_group_id >= MEM_GROUPS_NUM) {
+ DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+ return 0;
+ }
+
+ if (qed_grc_is_mem_included(p_hwfn,
+ (enum block_id)cond_hdr->block_id,
+ mem_group_id)) {
+ u32 mem_byte_addr =
+ DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS));
+ u32 mem_len = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_LENGTH);
+ char storm_letter = 'a';
+ bool is_storm = false;
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS)
+ * (mem_len / MAX_LCIDS);
+ else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS)
+ * (mem_len / MAX_LTIDS);
+
+ /* If memory is associated with Storm, update
+ * Storm details.
+ */
+ if (s_block_defs[cond_hdr->block_id]->
+ associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs[
+ cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn, p_ptt,
+ dump_buf + offset, dump, NULL,
+ mem_byte_addr, mem_len, 0,
+ false,
+ s_mem_group_names[mem_group_id],
+ is_storm, storm_letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_mems_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ offset += qed_grc_dump_mem_entries(p_hwfn,
+ p_ptt,
+ curr_input_mems_arr,
+ dump_buf + offset,
+ dump);
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Dumping split memories is currently not supported\n");
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 num_lids,
+ u32 lid_size,
+ u32 rd_reg_addr,
+ u8 storm_id)
+{
+ u32 i, lid, total_size;
+ u32 offset = 0;
+
+ if (!lid_size)
+ return 0;
+ lid_size *= BYTES_IN_DWORD;
+ total_size = num_lids * lid_size;
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ 0,
+ total_size,
+ lid_size * 32,
+ false,
+ name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Dump context data */
+ if (dump) {
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].cm_ctx_wr_addr,
+ BIT(9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ rd_reg_addr);
+ }
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ /* Dump Conn AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Conn ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_rd_addr,
+ storm_id);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC IORs data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ char buf[10] = "IOR_SET_?";
+ u8 storm_id, set_id;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE +
+ DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ s_storm_defs
+ [storm_id].letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 storm_id)
+{
+ u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+ u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ "vfc_cam",
+ 0,
+ total_size,
+ 256,
+ false,
+ "vfc_cam",
+ true, s_storm_defs[storm_id].letter);
+ if (dump) {
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+ u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+ u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ ram_defs->mem_name,
+ 0,
+ total_size,
+ 256,
+ false,
+ ram_defs->type_name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ if (!dump)
+ return offset + total_size;
+
+ for (row = ram_defs->base_row;
+ row < ram_defs->base_row + ram_defs->num_rows;
+ row++, offset += VFC_RAM_RESP_DWORDS) {
+ /* Write VFC RAM command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ ram_cmd, VFC_RAM_CMD_DWORDS);
+
+ /* Write VFC RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ ram_addr, VFC_RAM_ADDR_DWORDS);
+
+ /* Read VFC RAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id, i;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) &&
+ s_storm_defs[storm_id].has_vfc &&
+ (storm_id != DBG_PSTORM_ID ||
+ dev_data->platform_id == PLATFORM_ASIC)) {
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ storm_id,
+ &s_vfc_ram_defs
+ [i]);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 rss_mem_id;
+
+ for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+ struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
+ u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
+ u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
+ u32 total_size = (num_entries * entry_width) / 32;
+ bool packed = (entry_width == 16);
+ u32 addr = rss_defs->addr;
+ u32 i, j;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ rss_defs->mem_name,
+ addr,
+ total_size,
+ entry_width,
+ packed,
+ rss_defs->type_name, false, 0);
+
+ if (!dump) {
+ offset += total_size;
+ continue;
+ }
+
+ /* Dump RSS data */
+ for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
+ for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
+ *(dump_buf + offset) =
+ qed_rd(p_hwfn, p_ptt,
+ RSS_REG_RSS_RAM_DATA +
+ DWORDS_TO_BYTES(j));
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char mem_name[12] = "???_BIG_RAM";
+ char type_name[8] = "???_RAM";
+ u32 ram_size, total_blocks;
+ u32 offset = 0, i, j;
+
+ total_blocks =
+ s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+
+ strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+
+ /* Dump memory header */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ ram_size,
+ BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ false, type_name, false, 0);
+
+ if (!dump)
+ return offset + ram_size;
+
+ /* Read and dump Big RAM data */
+ for (i = 0; i < total_blocks / 2; i++) {
+ qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
+ i);
+ for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
+ s_big_ram_defs[big_ram_id].
+ data_reg_addr +
+ DWORDS_TO_BYTES(j));
+ }
+
+ return offset;
+}
+
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ bool block_enable[MAX_BLOCK_ID] = { 0 };
+ bool halted = false;
+ u32 offset = 0;
+
+ /* Halt MCP */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Dump MCP scratchpad */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_SCRATCH,
+ MCP_REG_SCRATCH_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP cpu_reg_file */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_CPU_REG_FILE,
+ MCP_REG_CPU_REG_FILE_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP registers */
+ block_enable[BLOCK_MCP] = true;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, block_enable, "block", "MCP");
+
+ /* Dump required non-MCP registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+ dump, 1, "eng", -1, "block", "MCP");
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (MISC_REG_SHARED_MEM_ADDR), 1);
+
+ /* Release MCP */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+ return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs. */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+ char mem_name[32];
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+ struct phy_defs *phy_defs = &s_phy_defs[phy_id];
+ int printed_chars;
+
+ printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name);
+ if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid PHY memory name\n");
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ PHY_DUMP_SIZE_DWORDS,
+ 16, true, mem_name, false, 0);
+ if (dump) {
+ u32 addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ u32 addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ u32 data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ u32 data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ u8 *bytes_buf = (u8 *)(dump_buf + offset);
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_hi_addr);
+ }
+ }
+ }
+
+ offset += PHY_DUMP_SIZE_DWORDS;
+ }
+
+ return offset;
+}
+
+static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 cycle_en,
+ u8 right_shift, u8 force_valid, u8 force_frame)
+{
+ struct block_defs *p_block_defs = s_block_defs[block_id];
+
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, block_id, line_id, addr, i;
+ struct block_defs *p_block_defs;
+
+ if (dump) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG, "Dumping static debug data...\n");
+
+ /* Disable all blocks debug output */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (p_block_defs->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ }
+
+ qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+ qed_bus_set_framing_mode(p_hwfn,
+ p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+ }
+
+ /* Dump all static debug lines for each relevant block */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ continue;
+
+ /* Dump static section params */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ p_block_defs->name, 0,
+ block_dwords, 32, false,
+ "STATIC", false, 0);
+
+ if (dump && !dev_data->block_in_reset[block_id]) {
+ u8 dbg_client_id =
+ p_block_defs->dbg_client_id[dev_data->chip_id];
+
+ /* Enable block's client */
+ qed_bus_enable_clients(p_hwfn, p_ptt,
+ BIT(dbg_client_id));
+
+ for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id,
+ 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
+ i < STATIC_DEBUG_LINE_DWORDS;
+ i++, offset++, addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
+ addr);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ } else {
+ /* All lines are invalid - dump zeros */
+ if (dump)
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ }
+ }
+
+ if (dump) {
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ }
+
+ return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ bool parities_masked = false;
+ u8 i, port_mode = 0;
+ u32 offset = 0;
+
+ /* Check if emulation platform */
+ *num_dumped_dwords = 0;
+
+ /* Fill GRC parameters that were not set by the user with their default
+ * value.
+ */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ /* Find port mode */
+ if (dump) {
+ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+ case 0:
+ port_mode = 1;
+ break;
+ case 1:
+ port_mode = 2;
+ break;
+ case 2:
+ port_mode = 4;
+ break;
+ }
+ }
+
+ /* Update reset state */
+ if (dump)
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 4);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "grc-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-lcids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-ltids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "num-ports", port_mode);
+
+ /* Dump reset registers (dumped before taking blocks out of reset ) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_reset_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Take all blocks out of reset (using reset registers) */
+ if (dump) {
+ qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
+
+ /* Disable all parities using MFW command */
+ if (dump) {
+ parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+ if (!parities_masked) {
+ if (qed_grc_get_param
+ (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+ return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
+ }
+ }
+
+ /* Dump modified registers (dumped before modifying them) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_modified_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Stall storms */
+ if (dump &&
+ (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_IOR) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+ qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+ /* Dump all regs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+ /* Dump all blocks except MCP */
+ bool block_enable[MAX_BLOCK_ID];
+
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ block_enable[i] = true;
+ block_enable[BLOCK_MCP] = false;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ block_enable, NULL, NULL);
+ }
+
+ /* Dump memories */
+ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+ offset += qed_grc_dump_mcp(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump context */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+ offset += qed_grc_dump_ctx(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump RSS memories */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+ offset += qed_grc_dump_rss(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump Big RAM */
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+ offset += qed_grc_dump_big_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, i);
+
+ /* Dump IORs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
+ offset += qed_grc_dump_iors(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump VFC */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
+ offset += qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump PHY tbus */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+ CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ offset += qed_grc_dump_phy(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump static debug data */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_STATIC) &&
+ dev_data->bus.state == DBG_BUS_STATE_IDLE)
+ offset += qed_grc_dump_static_debug(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+ if (dump) {
+ /* Unstall storms */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+ qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+ /* Clear parity status */
+ qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+ /* Enable all parities using MFW command */
+ if (parities_masked)
+ qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+ }
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *
+ dump_buf,
+ bool dump,
+ u16 rule_id,
+ const struct dbg_idle_chk_rule *rule,
+ u16 fail_entry_id, u32 *cond_reg_values)
+{
+ const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays
+ [BIN_BUF_DBG_IDLE_CHK_REGS].
+ ptr)[rule->reg_offset];
+ const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct dbg_idle_chk_result_hdr *hdr =
+ (struct dbg_idle_chk_result_hdr *)dump_buf;
+ const struct dbg_idle_chk_info_reg *info_regs =
+ &regs[rule->num_cond_regs].info_reg;
+ u32 next_reg_offset = 0, i, offset = 0;
+ u8 reg_id;
+
+ /* Dump rule data */
+ if (dump) {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->rule_id = rule_id;
+ hdr->mem_entry_id = fail_entry_id;
+ hdr->severity = rule->severity;
+ hdr->num_dumped_cond_regs = rule->num_cond_regs;
+ }
+
+ offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+ /* Dump condition register values */
+ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+
+ /* Write register header */
+ if (dump) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
+ + offset);
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0,
+ sizeof(struct dbg_idle_chk_result_reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0
+ ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size;
+ i++, next_reg_offset++, offset++)
+ dump_buf[offset] =
+ cond_reg_values[next_reg_offset];
+ } else {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+ reg->entry_size;
+ }
+ }
+
+ /* Dump info register values */
+ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+ const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+ u32 block_id;
+
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+ continue;
+ }
+
+ /* Check if register's block is in reset */
+ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ if (!dev_data->block_in_reset[block_id]) {
+ bool eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match =
+ qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 grc_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS));
+
+ /* Write register header */
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg->size;
+ i++, offset++, grc_addr += 4)
+ dump_buf[offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump,
+ const struct dbg_idle_chk_rule *input_rules,
+ u32 num_input_rules, u32 *num_failing_rules)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+ u32 i, j, offset = 0;
+ u16 entry_id;
+ u8 reg_id;
+
+ *num_failing_rules = 0;
+ for (i = 0; i < num_input_rules; i++) {
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_rule *rule;
+ const union dbg_idle_chk_reg *regs;
+ u16 num_reg_entries = 1;
+ bool check_rule = true;
+ const u32 *imm_values;
+
+ rule = &input_rules[i];
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
+ [rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
+ [rule->imm_offset];
+
+ /* Check if all condition register blocks are out of reset, and
+ * find maximal number of entries (all condition registers that
+ * are memories must have the same size, which is > 1).
+ */
+ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+ reg_id++) {
+ u32 block_id = GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ check_rule = !dev_data->block_in_reset[block_id];
+ if (cond_regs[reg_id].num_entries > num_reg_entries)
+ num_reg_entries = cond_regs[reg_id].num_entries;
+ }
+
+ if (!check_rule && dump)
+ continue;
+
+ /* Go over all register entries (number of entries is the same
+ * for all condition registers).
+ */
+ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+ /* Read current entry of all condition registers */
+ if (dump) {
+ u32 next_reg_offset = 0;
+
+ for (reg_id = 0;
+ reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg
+ *reg = &cond_regs[reg_id];
+
+ /* Find GRC address (if it's a memory,
+ * the address of the specific entry is
+ * calculated).
+ */
+ u32 grc_addr =
+ DWORDS_TO_BYTES(
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS));
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two
+ (reg->entry_size) : 1;
+
+ grc_addr +=
+ DWORDS_TO_BYTES(
+ (reg->start_entry +
+ entry_id)
+ * padded_entry_size);
+ }
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ for (j = 0; j < reg->entry_size;
+ j++, next_reg_offset++,
+ grc_addr += 4)
+ cond_reg_values[next_reg_offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+
+ /* Call rule's condition function - a return value of
+ * true indicates failure.
+ */
+ if ((*cond_arr[rule->cond_id])(cond_reg_values,
+ imm_values) || !dump) {
+ offset +=
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
+ (*num_failing_rules)++;
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0, num_failing_rules = 0;
+ u32 num_failing_rules_offset;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "idle-chk");
+
+ /* Dump idle check section header with a single parameter */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+ num_failing_rules_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ const struct dbg_idle_chk_cond_hdr *cond_hdr =
+ (const struct dbg_idle_chk_cond_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
+ [input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 curr_failing_rules;
+
+ offset +=
+ qed_idle_chk_dump_rule_entries(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ (const struct dbg_idle_chk_rule *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
+ ptr[input_offset],
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
+ &curr_failing_rules);
+ num_failing_rules += curr_failing_rules;
+ }
+
+ input_offset += cond_hdr->data_size;
+ }
+
+ /* Overwrite num_rules parameter */
+ if (dump)
+ qed_dump_num_param(dump_buf + num_failing_rules_offset,
+ dump, "num_rules", num_failing_rules);
+
+ return offset;
+}
+
+/* Finds the meta data image in NVRAM. */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+
+ /* Call NVRAM get file command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type, &ret_mcp_resp, &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att) != 0)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
+ u32 bytes_to_copy, read_offset = 0;
+ s32 bytes_left = nvram_size_bytes;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ (nvram_offset_bytes +
+ read_offset) |
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT),
+ &ret_mcp_resp, &ret_mcp_param,
+ &ret_read_size,
+ (u32 *)((u8 *)ret_buf +
+ read_offset)) != 0)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr - the GRC address of the trace data
+ * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
+ * the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *trace_data_grc_addr,
+ u32 *trace_data_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ u32 signature;
+
+ /* Extract MCP trace section GRC address from offsize structure (within
+ * scratchpad).
+ */
+ *trace_data_grc_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+ /* Read signature from MCP trace section */
+ signature = qed_rd(p_hwfn, p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, signature));
+ if (signature != MFW_TRACE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read trace size from MCP trace section */
+ *trace_data_size_bytes = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+ return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM.
+ * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
+ * file)
+ * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
+ * Trace meta data starts (invalid when loaded from file)
+ * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 trace_data_size_bytes,
+ u32 *running_bundle_id,
+ u32 *trace_meta_offset_bytes,
+ u32 *trace_meta_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Find running bundle ID */
+ u32 running_mfw_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+ QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+ enum dbg_status status;
+ u32 nvram_image_type;
+
+ *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+ if (*running_bundle_id > 1)
+ return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+ /* Find image in NVRAM */
+ nvram_image_type =
+ (*running_bundle_id ==
+ DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
+
+ return status;
+}
+
+/* Reads the MCP Trace data from the specified GRC address into the specified
+ * buffer.
+ */
+static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr, u32 size_in_dwords, u32 *buf)
+{
+ u32 i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
+ size_in_dwords, grc_addr);
+ for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
+ buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+}
+
+/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
+ * buffer.
+ */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_in_bytes,
+ u32 size_in_bytes, u32 *buf)
+{
+ u8 *byte_buf = (u8 *)buf;
+ u8 modules_num, i;
+ u32 signature;
+
+ /* Read meta data from NVRAM */
+ enum dbg_status status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Extract and check first signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Extract number of modules */
+ modules_num = *(byte_buf++);
+
+ /* Skip all modules */
+ for (i = 0; i < modules_num; i++) {
+ u8 module_len = *(byte_buf++);
+
+ byte_buf += module_len;
+ }
+
+ /* Extract and check second signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+ return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+ u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ enum dbg_status status;
+ int halted = 0;
+
+ *num_dumped_dwords = 0;
+
+ /* Get trace data info */
+ status = qed_mcp_trace_get_data_info(p_hwfn,
+ p_ptt,
+ &trace_data_grc_addr,
+ &trace_data_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "mcp-trace");
+
+ /* Halt MCP while reading from scratchpad so the read data will be
+ * consistent if halt fails, MCP trace is taken anyway, with a small
+ * risk that it may be corrupt.
+ */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Find trace data size */
+ trace_data_size_dwords =
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
+
+ /* Dump trace data section header and param */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_data", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_data_size_dwords);
+
+ /* Read trace data from scratchpad into dump buffer */
+ if (dump)
+ qed_mcp_trace_read_data(p_hwfn,
+ p_ptt,
+ trace_data_grc_addr,
+ trace_data_size_dwords,
+ dump_buf + offset);
+ offset += trace_data_size_dwords;
+
+ /* Resume MCP (only if halt succeeded) */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ /* Dump trace meta section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_meta", 1);
+
+ /* Read trace meta info */
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump trace meta size param (trace_meta_size_bytes is always
+ * dword-aligned).
+ */
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ trace_meta_size_dwords);
+
+ /* Read trace meta image into dump buffer */
+ if (dump) {
+ status = qed_mcp_trace_read_meta(p_hwfn,
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status != DBG_STATUS_OK)
+ return status;
+ }
+
+ offset += trace_meta_size_dwords;
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump GRC FIFO */
+enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "reg-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "reg_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += REG_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
+ REG_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ REG_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "igu-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "igu_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += IGU_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
+ IGU_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_MEMORY,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ IGU_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, size_param_offset, override_window_dwords;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "protection-override");
+
+ /* Dump data section header and param. The size param is 0 for now, and
+ * is overwritten after reading the data.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "protection_override_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ /* Add override window info to buffer */
+ override_window_dwords =
+ qed_rd(p_hwfn, p_ptt,
+ GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ GRC_REG_PROTECTION_OVERRIDE_WINDOW,
+ (u64)(uintptr_t)(dump_buf + offset),
+ override_window_dwords, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ offset += override_window_dwords;
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char storm_letter_str[2] = "?";
+ struct fw_info fw_info;
+ u32 offset = 0, i;
+ u8 storm_id;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "fw-asserts");
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
+ last_list_idx, element_addr;
+
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Dump FW Asserts section header and params */
+ storm_letter_str[0] = s_storm_defs[storm_id].letter;
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
+ storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ fw_info.fw_asserts_section.
+ list_element_dword_size);
+
+ if (!dump) {
+ offset += fw_info.fw_asserts_section.
+ list_element_dword_size;
+ continue;
+ }
+
+ /* Read and dump FW Asserts data */
+ fw_asserts_section_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
+ section_ram_line_offset);
+ next_list_idx_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_next_index_dword_offset);
+ next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+ last_list_idx = (next_list_idx > 0
+ ? next_list_idx
+ : fw_info.fw_asserts_section.list_num_elements)
+ - 1;
+ element_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_dword_offset) +
+ last_list_idx *
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_element_dword_size);
+ for (i = 0;
+ i < fw_info.fw_asserts_section.list_element_dword_size;
+ i++, offset++, element_addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ }
+
+ /* Dump last section */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ return offset;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* GRC Dump */
+ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Clear all GRC params */
+ qed_dbg_grc_clear_params(p_hwfn);
+ return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ if (!dev_data->idle_chk.buf_size_set) {
+ dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt,
+ NULL, false);
+ dev_data->idle_chk.buf_size_set = true;
+ }
+
+ *buf_size = dev_data->idle_chk.buf_size;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Idle Check Dump */
+ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Perform dump */
+ return qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+ char *format_str;
+};
+
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+};
+
+/* Reg fifo element */
+struct reg_fifo_element {
+ u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
+#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT 24
+#define REG_FIFO_ELEMENT_PF_MASK 0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT 28
+#define REG_FIFO_ELEMENT_VF_MASK 0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT 36
+#define REG_FIFO_ELEMENT_PORT_MASK 0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT 43
+#define REG_FIFO_ELEMENT_MASTER_MASK 0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT 47
+#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+ u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
+ u32 dword1;
+ u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
+ u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+ u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+ u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+ u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
+};
+
+enum igu_fifo_sources {
+ IGU_SRC_PXP0,
+ IGU_SRC_PXP1,
+ IGU_SRC_PXP2,
+ IGU_SRC_PXP3,
+ IGU_SRC_PXP4,
+ IGU_SRC_PXP5,
+ IGU_SRC_PXP6,
+ IGU_SRC_PXP7,
+ IGU_SRC_CAU,
+ IGU_SRC_ATTN,
+ IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+ IGU_ADDR_TYPE_MSIX_MEM,
+ IGU_ADDR_TYPE_WRITE_PBA,
+ IGU_ADDR_TYPE_WRITE_INT_ACK,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+ IGU_ADDR_TYPE_READ_INT,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+ IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+ u16 start_addr;
+ u16 end_addr;
+ char *desc;
+ char *vf_desc;
+ enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN 1024
+#define MCP_TRACE_MAX_MODULE_LEN 8
+#define MCP_TRACE_FORMAT_MAX_PARAMS 3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
+
+/********************************* Macros ************************************/
+
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+ "Operation completed successfully",
+ "Debug application version wasn't set",
+ "Unsupported debug application version",
+ "The debug block wasn't reset since the last recording",
+ "Invalid arguments",
+ "The debug output was already set",
+ "Invalid PCI buffer size",
+ "PCI buffer allocation failed",
+ "A PCI buffer wasn't allocated",
+ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ "GRC/Timestamp input overlap in cycle dword 0",
+ "Cannot record Storm data since the entire recording cycle is used by HW",
+ "The Storm was already enabled",
+ "The specified Storm wasn't enabled",
+ "The block was already enabled",
+ "The specified block wasn't enabled",
+ "No input was enabled for recording",
+ "Filters and triggers are not allowed when recording in 64b units",
+ "The filter was already enabled",
+ "The trigger was already enabled",
+ "The trigger wasn't enabled",
+ "A constraint can be added only after a filter was enabled or a trigger state was added",
+ "Cannot add more than 3 trigger states",
+ "Cannot add more than 4 constraints per filter or trigger state",
+ "The recording wasn't started",
+ "A trigger was configured, but it didn't trigger",
+ "No data was recorded",
+ "Dump buffer is too small",
+ "Dumped data is not aligned to chunks",
+ "Unknown chip",
+ "Failed allocating virtual memory",
+ "The input block is in reset",
+ "Invalid MCP trace signature found in NVRAM",
+ "Invalid bundle ID found in NVRAM",
+ "Failed getting NVRAM image",
+ "NVRAM image is not dword-aligned",
+ "Failed reading from NVRAM",
+ "Idle check parsing failed",
+ "MCP Trace data is corrupt",
+ "Dump doesn't contain meta data - it must be provided in an image file",
+ "Failed to halt MCP",
+ "Failed to resume MCP after halt",
+ "DMAE transaction failed",
+ "Failed to empty SEMI sync FIFO",
+ "IGU FIFO data is corrupt",
+ "MCP failed to mask parities",
+ "FW Asserts parsing failed",
+ "GRC FIFO data is corrupt",
+ "Protection Override data is corrupt",
+ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+ "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+ "Error",
+ "Error if no traffic",
+ "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+ "ERROR",
+ "TRACE",
+ "DEBUG"
+};
+
+/* Parsing strings */
+static const char * const s_access_strs[] = {
+ "read",
+ "write"
+};
+
+static const char * const s_privilege_strs[] = {
+ "VF",
+ "PDA",
+ "HV",
+ "UA"
+};
+
+static const char * const s_protection_strs[] = {
+ "(default)",
+ "(default)",
+ "(default)",
+ "(default)",
+ "override VF",
+ "override PDA",
+ "override HV",
+ "override UA"
+};
+
+static const char * const s_master_strs[] = {
+ "???",
+ "pxp",
+ "mcp",
+ "msdm",
+ "psdm",
+ "ysdm",
+ "usdm",
+ "tsdm",
+ "xsdm",
+ "dbu",
+ "dmae",
+ "???",
+ "???",
+ "???",
+ "???",
+ "???"
+};
+
+static const char * const s_reg_fifo_error_strs[] = {
+ "grc timeout",
+ "address doesn't belong to any block",
+ "reserved address in block or write to read-only address",
+ "privilege/protection mismatch",
+ "path isolation error"
+};
+
+static const char * const s_igu_fifo_source_strs[] = {
+ "TSTORM",
+ "MSTORM",
+ "USTORM",
+ "XSTORM",
+ "YSTORM",
+ "PSTORM",
+ "PCIE",
+ "NIG_QM_PBF",
+ "CAU",
+ "ATTN",
+ "GRC",
+};
+
+static const char * const s_igu_fifo_error_strs[] = {
+ "no error",
+ "length error",
+ "function disabled",
+ "VF sent command to attnetion address",
+ "host sent prod update command",
+ "read of during interrupt register while in MIMD mode",
+ "access to PXP BAR reserved address",
+ "producer update command to attention index",
+ "unknown error",
+ "SB index not valid",
+ "SB relative index and FID not found",
+ "FID not match",
+ "command with error flag asserted (PCI error or CAU discard)",
+ "VF sent cleanup and RF cleanup is disabled",
+ "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+ {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x201, 0x201, "Write PBA[64:127]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+ IGU_ADDR_TYPE_WRITE_INT_ACK},
+ {0x5f0, 0x5f0, "Attention bits update", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f1, 0x5f1, "Attention bits set", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f2, 0x5f2, "Attention bits clear", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* MCP Trace meta data - used in case the dump doesn't contain the meta data
+ * (e.g. due to no NVRAM access).
+ */
+static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+ return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+ return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+ u32 *offset,
+ u32 buf_size, u8 num_bytes_to_read)
+{
+ u8 *bytes_buf = (u8 *)buf;
+ u8 *val_ptr;
+ u32 val = 0;
+ u8 i;
+
+ val_ptr = (u8 *)&val;
+
+ for (i = 0; i < num_bytes_to_read; i++) {
+ val_ptr[i] = bytes_buf[*offset];
+ *offset = qed_cyclic_add(*offset, 1, buf_size);
+ }
+
+ return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+ return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+ u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+ *offset += 4;
+ return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+ const char *source_str = &((const char *)buf)[*offset];
+
+ strncpy(dest, source_str, size);
+ dest[size - 1] = '\0';
+ *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+ return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+ const char **param_name,
+ const char **param_str_val, u32 *param_num_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0; /* In bytes */
+
+ /* Extract param name */
+ *param_name = char_buf;
+ offset += strlen(*param_name) + 1;
+
+ /* Check param type */
+ if (*(char_buf + offset++)) {
+ /* String param */
+ *param_str_val = char_buf + offset;
+ offset += strlen(*param_str_val) + 1;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ } else {
+ /* Numeric param */
+ *param_str_val = NULL;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ *param_num_val = *(u32 *)(char_buf + offset);
+ offset += 4;
+ }
+
+ return offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+ const char **section_name,
+ u32 *num_section_params)
+{
+ const char *param_str_val;
+
+ return qed_read_param(dump_buf,
+ section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+ u32 num_section_params,
+ char *results_buf, u32 *num_chars_printed)
+{
+ u32 i, dump_offset = 0, results_offset = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ const char *param_name;
+ const char *param_str_val;
+ u32 param_num_val = 0;
+
+ dump_offset += qed_read_param(dump_buf + dump_offset,
+ &param_name,
+ &param_str_val, &param_num_val);
+ if (param_str_val)
+ /* String param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %s\n", param_name, param_str_val);
+ else if (strcmp(param_name, "fw-timestamp"))
+ /* Numeric param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %d\n", param_name, param_num_val);
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ *num_chars_printed = results_offset;
+ return dump_offset;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *dump_buf_end,
+ u32 num_rules,
+ bool print_fw_idle_chk,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ u16 i, j;
+
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ /* Go over dumped results */
+ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+ rule_idx++) {
+ const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const char *parsing_str;
+ u32 parsing_str_offset;
+ const char *lsi_msg;
+ u8 curr_reg_id = 0;
+ bool has_fw_msg;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ rule_parsing_data =
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ ptr[hdr->rule_id];
+ parsing_str_offset =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ has_fw_msg =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = &((const char *)
+ s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
+ lsi_msg = parsing_str;
+
+ if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+ return 0;
+
+ /* Skip rule header */
+ dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+
+ /* Update errors/warnings count */
+ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+ hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+ (*num_errors)++;
+ else
+ (*num_warnings)++;
+
+ /* Print rule severity */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s: ",
+ s_idle_chk_severity_str[hdr->severity]);
+
+ /* Print rule message */
+ if (has_fw_msg)
+ parsing_str += strlen(parsing_str) + 1;
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s.",
+ has_fw_msg &&
+ print_fw_idle_chk ? parsing_str : lsi_msg);
+ parsing_str += strlen(parsing_str) + 1;
+
+ /* Print register values */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " Registers:");
+ for (i = 0;
+ i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+ i++) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr
+ = (struct dbg_idle_chk_result_reg_hdr *)
+ dump_buf;
+ bool is_mem =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ u8 reg_id =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+ /* Skip reg header */
+ dump_buf +=
+ (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+
+ /* Skip register names until the required reg_id is
+ * reached.
+ */
+ for (; reg_id > curr_reg_id;
+ curr_reg_id++,
+ parsing_str += strlen(parsing_str) + 1);
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " %s",
+ parsing_str);
+ if (i < hdr->num_dumped_cond_regs && is_mem)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "[%d]", hdr->mem_entry_id +
+ reg_hdr->start_entry);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "=");
+ for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "0x%x", *dump_buf);
+ if (j < reg_hdr->size - 1)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ",");
+ }
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ /* Check if end of dump buffer was exceeded */
+ if (dump_buf > dump_buf_end)
+ return 0;
+ return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+ u32 num_section_params = 0, num_rules;
+ u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ *parsed_results_bytes = 0;
+ *num_errors = 0;
+ *num_warnings = 0;
+ if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read idle_chk section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &num_rules);
+ if (strcmp(param_name, "num_rules") != 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ if (num_rules) {
+ u32 rules_print_size;
+
+ /* Print FW output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "FW_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ true,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print LSI output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nLSI_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ false,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ }
+
+ /* Print errors/warnings count */
+ if (*num_errors) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+ *num_errors, *num_warnings);
+ } else if (*num_warnings) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly (with %d warnings)\n",
+ *num_warnings);
+ } else {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly\n");
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+/* Frees the specified MCP Trace meta data */
+static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
+ struct mcp_trace_meta *meta)
+{
+ u32 i;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf,
+ struct mcp_trace_meta *meta)
+{
+ u8 *meta_buf_bytes = (u8 *)meta_buf;
+ u32 offset = 0, signature, i;
+
+ memset(meta, 0, sizeof(*meta));
+
+ /* Read first signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of modules and allocate memory for all the modules
+ * pointers.
+ */
+ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+ meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+ if (!meta->modules)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all module strings */
+ for (i = 0; i < meta->modules_num; i++) {
+ u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+ *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+ if (!(*(meta->modules + i))) {
+ /* Update number of modules to be released */
+ meta->modules_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+ *(meta->modules + i));
+ if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+ (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+ }
+
+ /* Read second signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of formats and allocate memory for all formats */
+ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ meta->formats = kzalloc(meta->formats_num *
+ sizeof(struct mcp_trace_format),
+ GFP_KERNEL);
+ if (!meta->formats)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all strings */
+ for (i = 0; i < meta->formats_num; i++) {
+ struct mcp_trace_format *format_ptr = &meta->formats[i];
+ u8 format_len;
+
+ format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+ &offset);
+ format_len =
+ (format_ptr->data &
+ MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+ if (!format_ptr->format_str) {
+ /* Update number of modules to be released */
+ meta->formats_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes,
+ &offset,
+ format_len, format_ptr->format_str);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_mask, param_shift, param_num_val;
+ u32 num_section_params, offset, end_offset, bytes_left;
+ const char *section_name, *param_name, *param_str_val;
+ u32 trace_data_dwords, trace_meta_dwords;
+ struct mcp_trace_meta meta;
+ struct mcp_trace *trace;
+ enum dbg_status status;
+ const u32 *meta_buf;
+ u8 *trace_buf;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read trace_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_data_dwords = param_num_val;
+
+ /* Prepare trace info */
+ trace = (struct mcp_trace *)dump_buf;
+ trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ offset = trace->trace_oldest;
+ end_offset = trace->trace_prod;
+ bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+ dump_buf += trace_data_dwords;
+
+ /* Read meta_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_meta"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size") != 0)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_meta_dwords = param_num_val;
+
+ /* Choose meta data buffer */
+ if (!trace_meta_dwords) {
+ /* Dump doesn't include meta data */
+ if (!s_mcp_trace_meta.ptr)
+ return DBG_STATUS_MCP_TRACE_NO_META;
+ meta_buf = s_mcp_trace_meta.ptr;
+ } else {
+ /* Dump includes meta data */
+ meta_buf = dump_buf;
+ }
+
+ /* Allocate meta data memory */
+ status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+ if (status != DBG_STATUS_OK)
+ goto free_mem;
+
+ /* Ignore the level and modules masks - just print everything that is
+ * already in the buffer.
+ */
+ while (bytes_left) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ MFW_TRACE_ENTRY_SIZE);
+ bytes_left -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx > meta.formats_num) {
+ u8 format_size =
+ (u8)((header &
+ MFW_TRACE_PRM_SIZE_MASK) >>
+ MFW_TRACE_PRM_SIZE_SHIFT);
+
+ if (bytes_left < format_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ offset = qed_cyclic_add(offset,
+ format_size, trace->size);
+ bytes_left -= format_size;
+ continue;
+ }
+
+ format_ptr = &meta.formats[format_idx];
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size =
+ (u8)((format_ptr->data &
+ param_mask) >> param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+ if (bytes_left < param_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ param_size);
+ bytes_left -= param_size;
+ }
+
+ format_level =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_LEVEL_MASK) >>
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ format_module =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_MODULE_MASK) >>
+ MCP_TRACE_FORMAT_MODULE_SHIFT);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ /* Print current message to results buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ meta.modules[format_module]);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ format_ptr->format_str, params[0], params[1],
+ params[2]);
+ }
+
+free_mem:
+ *parsed_results_bytes = results_offset + 1;
+ qed_mcp_trace_free_meta(p_hwfn, &meta);
+ return status;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct reg_fifo_element *elements;
+ u8 i, j, err_val, vf_val;
+ char vf_str[4];
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read reg_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "reg_fifo_data"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+ elements = (struct reg_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ bool err_printed = false;
+
+ /* Discover if element belongs to a VF or a PF */
+ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+ if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+ sprintf(vf_str, "%s", "N/A");
+ else
+ sprintf(vf_str, "%d", vf_val);
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ elements[i].data,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ACCESS)],
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].
+ data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PROTECTION)],
+ s_master_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_MASTER)]);
+
+ /* Print errors */
+ for (j = 0,
+ err_val = GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ERROR);
+ j < ARRAY_SIZE(s_reg_fifo_error_strs);
+ j++, err_val >>= 1) {
+ if (!(err_val & 0x1))
+ continue;
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ ", ");
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct igu_fifo_element *elements;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u8 i, j;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read igu_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "igu_fifo_data"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+ elements = (struct igu_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ /* dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ u64 dword12 =
+ ((u64)elements[i].dword2 << 32) | elements[i].dword1;
+ bool is_wr_cmd = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ bool is_pf = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ u16 cmd_addr = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ u8 source = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ u8 err_type = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+ const struct igu_fifo_addr_data *addr_data = NULL;
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
+ j++)
+ if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
+ cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
+ addr_data = &s_igu_fifo_addr_data[j];
+ if (!addr_data)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (addr_data->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data,
+ " vector_num=0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB=0x%x", cmd_addr - addr_data->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ /* Prepare parsed write data */
+ if (is_wr_cmd) {
+ u32 wr_data = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ u32 prod_cons = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_PROD_CONS);
+ u8 is_cleanup = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data,
+ "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ u8 cleanup_type = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ u8 en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ u8 segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ u8 timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb ==
+ 1 ? "disable" : "nop") :
+ "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+ } else {
+ parsed_wr_data[0] = '\0';
+ }
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
+ elements[i].dword2, elements[i].dword1,
+ elements[i].dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd", cmd_addr,
+ (!is_pf && addr_data->vf_desc)
+ ? addr_data->vf_desc : addr_data->desc,
+ parsed_addr_data, parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct protection_override_element *elements;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read protection_override_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "protection_override_data"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ elements = (struct protection_override_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ u32 address = GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ i, address,
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE),
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "protection override contained %d elements",
+ num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, num_section_params, param_num_val, i;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+ while (!last_section_found) {
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name,
+ &num_section_params);
+ if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ continue;
+ } else if (strcmp(section_name, "fw_asserts")) {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ /* Extract params */
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 num_errors, num_warnnings;
+
+ return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+ results_buf, &num_errors,
+ &num_warnnings);
+}
+
+/* Feature meta data lookup table */
+static struct {
+ char *name;
+ enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *size);
+ enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ u32 buf_size, u32 *dumped_dwords);
+ enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 num_dumped_dwords,
+ char *results_buf);
+ enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+} qed_features_lookup[] = {
+ {
+ "grc", qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL}, {
+ "idle_chk",
+ qed_dbg_idle_chk_get_dump_buf_size,
+ qed_dbg_idle_chk_dump,
+ qed_print_idle_chk_results_wrapper,
+ qed_get_idle_chk_results_buf_size}, {
+ "mcp_trace",
+ qed_dbg_mcp_trace_get_dump_buf_size,
+ qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+ qed_get_mcp_trace_results_buf_size}, {
+ "reg_fifo",
+ qed_dbg_reg_fifo_get_dump_buf_size,
+ qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+ qed_get_reg_fifo_results_buf_size}, {
+ "igu_fifo",
+ qed_dbg_igu_fifo_get_dump_buf_size,
+ qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+ qed_get_igu_fifo_results_buf_size}, {
+ "protection_override",
+ qed_dbg_protection_override_get_dump_buf_size,
+ qed_dbg_protection_override_dump,
+ qed_print_protection_override_results,
+ qed_get_protection_override_results_buf_size}, {
+ "fw_asserts",
+ qed_dbg_fw_asserts_get_dump_buf_size,
+ qed_dbg_fw_asserts_dump,
+ qed_print_fw_asserts_results,
+ qed_get_fw_asserts_results_buf_size},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+ u32 i, precision = 80;
+
+ if (!p_text_buf)
+ return;
+
+ pr_notice("\n%.*s", precision, p_text_buf);
+ for (i = precision; i < text_size; i += precision)
+ pr_cont("%.*s", precision, p_text_buf + i);
+ pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 text_size_bytes, null_char_pos, i;
+ enum dbg_status rc;
+ char *text_buf;
+
+ /* Check if feature supports formatting capability */
+ if (!qed_features_lookup[feature_idx].results_buf_size)
+ return DBG_STATUS_OK;
+
+ /* Obtain size of formatted output */
+ rc = qed_features_lookup[feature_idx].
+ results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, &text_size_bytes);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Make sure that the allocated size is a multiple of dword (4 bytes) */
+ null_char_pos = text_size_bytes - 1;
+ text_size_bytes = (text_size_bytes + 3) & ~0x3;
+
+ if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "formatted size of feature was too small %d. Aborting\n",
+ text_size_bytes);
+ return DBG_STATUS_INVALID_ARGS;
+ }
+
+ /* Allocate temp text buf */
+ text_buf = vzalloc(text_size_bytes);
+ if (!text_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Decode feature opcodes to string on temp buf */
+ rc = qed_features_lookup[feature_idx].
+ print_results(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, text_buf);
+ if (rc != DBG_STATUS_OK) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Replace the original null character with a '\n' character.
+ * The bytes that were added as a result of the dword alignment are also
+ * padded with '\n' characters.
+ */
+ for (i = null_char_pos; i < text_size_bytes; i++)
+ text_buf[i] = '\n';
+
+ /* Dump printable feature to log */
+ if (p_hwfn->cdev->dbg_params.print_data)
+ qed_dbg_print_feature(text_buf, text_size_bytes);
+
+ /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ * and formatted text buffer.
+ */
+ vfree(feature->dump_buf);
+ feature->dump_buf = text_buf;
+ feature->buf_size = text_size_bytes;
+ feature->dumped_dwords = text_size_bytes / 4;
+ return rc;
+}
+
+/* Generic function for performing the dump of a debug feature. */
+enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+ qed_features_lookup[feature_idx].name);
+
+ /* Dump_buf was already allocated need to free (this can happen if dump
+ * was called but file was never read).
+ * We can't use the buffer as is since size may have changed.
+ */
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+
+ /* Get buffer size from hsi, allocate accordingly, and perform the
+ * dump.
+ */
+ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+ feature->buf_size = buf_size_dwords * sizeof(u32);
+ feature->dump_buf = vmalloc(feature->buf_size);
+ if (!feature->dump_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ rc = qed_features_lookup[feature_idx].
+ perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
+ feature->buf_size / sizeof(u32),
+ &feature->dumped_dwords);
+
+ /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+ * In this case the buffer holds valid binary data, but we wont able
+ * to parse it (since parsing relies on data in NVRAM which is only
+ * accessible when MFW is responsive). skip the formatting but return
+ * success so that binary data is provided.
+ */
+ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return DBG_STATUS_OK;
+
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Format output */
+ rc = format_feature(p_hwfn, feature_idx);
+ return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+ num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+ num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_FEATURE_SHIFT 24
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+enum debug_print_features {
+ OLD_MODE = 0,
+ IDLE_CHK = 1,
+ GRC_DUMP = 2,
+ MCP_TRACE = 3,
+ REG_FIFO = 4,
+ PROTECTION_OVERRIDE = 5,
+ IGU_FIFO = 6,
+ PHY = 7,
+ FW_ASSERTS = 8,
+};
+
+static u32 qed_calc_regdump_header(enum debug_print_features feature,
+ int engine, u32 feature_size, u8 omit_engine)
+{
+ /* Insert the engine, feature and mode inside the header and combine it
+ * with feature size.
+ */
+ return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
+ (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
+ (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+ u8 cur_engine, omit_engine = 0, org_engine;
+ u32 offset = 0, feature_size;
+ int rc;
+
+ if (cdev->num_hwfns == 1)
+ omit_engine = 1;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Collect idle_chks and grcDump for each hw function */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "obtaining idle_chk and grcdump for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+
+ /* First idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* Second idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* reg_fifo dump */
+ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(REG_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+ }
+
+ /* igu_fifo dump */
+ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+ }
+
+ /* protection_override dump */
+ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE,
+ &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev,
+ "qed_dbg_protection_override failed. rc = %d\n",
+ rc);
+ }
+
+ /* fw_asserts dump */
+ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(FW_ASSERTS, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+ rc);
+ }
+
+ /* GRC dump - must be last because when mcp stuck it will
+ * clutter idle_chk, reg_fifo, ...
+ */
+ rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+ }
+ }
+
+ /* mcp_trace */
+ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+ u8 cur_engine, org_engine;
+ u32 regs_len = 0;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Engine specific */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "calculating idle_chk and grcdump register length for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+ }
+
+ /* Engine common */
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_set_debug_engine(cdev, org_engine);
+
+ return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ enum dbg_status dbg_rc;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ /* Acquire ptt */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EINVAL;
+
+ /* Get dump */
+ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+ if (dbg_rc != DBG_STATUS_OK) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+ qed_dbg_get_status_str(dbg_rc));
+ *num_dumped_bytes = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "copying debugfs feature to external buffer\n");
+ memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+ *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ 4;
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ if (!p_ptt)
+ return -EINVAL;
+
+ rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ buf_size_dwords = 0;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+ return qed_feature->buf_size;
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+ return cdev->dbg_params.engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+ engine_number);
+ cdev->dbg_params.engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+ const u8 *dbg_values;
+
+ /* Debug values are after init values.
+ * The offset is the first dword of the file.
+ */
+ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+ qed_dbg_set_bin_ptr((u8 *)dbg_values);
+ qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+ struct qed_dbg_feature *feature = NULL;
+ enum qed_dbg_features feature_idx;
+
+ /* Debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called.
+ */
+ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+ feature = &cdev->dbg_params.features[feature_idx];
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644
index 000000000000..f872d7324814
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -0,0 +1,54 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEBUGFS_H
+#define _QED_DEBUGFS_H
+
+enum qed_dbg_features {
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_IGU_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_NUM
+};
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0e4f4a9306b5..754f6a908858 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,14 +29,18 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+#include "qed_roce.h"
-static spinlock_t qm_lock;
-static bool qm_lock_init = false;
+static DEFINE_SPINLOCK(qm_lock);
+
+#define QED_MIN_DPIS (4)
+#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
/* API common to all protocols */
enum BAR_ID {
@@ -44,8 +48,7 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -70,8 +73,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
}
}
-void qed_init_dp(struct qed_dev *cdev,
- u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
{
u32 i;
@@ -150,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+ qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -343,7 +348,6 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
return 0;
alloc_err:
- DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
@@ -407,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
+#ifdef CONFIG_QED_LL2
+ struct qed_ll2_info *p_ll2_info;
+#endif
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
@@ -427,18 +434,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
RESC_NUM(p_hwfn, QED_L2_QUEUE);
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Tx Cids\n");
+ if (!p_hwfn->p_tx_cids)
goto alloc_no_mem;
- }
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Rx Cids\n");
+ if (!p_hwfn->p_rx_cids)
goto alloc_no_mem;
- }
}
for_each_hwfn(cdev, i) {
@@ -523,29 +524,29 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2) {
+ p_ll2_info = qed_ll2_alloc(p_hwfn);
+ if (!p_ll2_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ll2_info = p_ll2_info;
+ }
+#endif
+
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dmae_info structure\n");
+ if (rc)
goto alloc_err;
- }
/* DCBX initialization */
rc = qed_dcbx_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dcbx structure\n");
+ if (rc)
goto alloc_err;
- }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
- if (!cdev->reset_stats) {
- DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!cdev->reset_stats)
+ goto alloc_no_mem;
return 0;
@@ -580,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2)
+ qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
}
}
@@ -605,9 +610,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
- DP_NOTICE(
- p_hwfn,
- "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ DP_NOTICE(p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -701,17 +705,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_block->function_id,
- 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
- sb_entry);
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
}
}
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+ struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
@@ -759,7 +760,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_port_unpretend(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
- if (rc != 0)
+ if (rc)
return rc;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -780,6 +781,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
@@ -787,38 +791,141 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- int rc = 0;
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
- if (rc != 0)
- return rc;
+ /* Calculate DPI size */
+ dpi_page_size_1 = QED_WID_SIZE * n_cpus;
+ dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
+ dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ dpi_bit_shift = ilog2(dpi_page_size / 4096);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
+ dpi_count = pwm_region_size / dpi_page_size;
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%08x] is first eth on engine\n", pf_id);
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
- /* We should have configured BIT for ppfid, i.e., the
- * relative function number in the port. But there's a
- * bug in LLH in BB where the ppfid is actually engine
- * based, so we need to take this into account.
- */
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
- }
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return -EINVAL;
+
+ return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+ QED_ROCE_EDPM_MODE_ENABLE = 0,
+ QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+ QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
- /* Take the protocol-based hit vector if there is a hit,
- * otherwise take the other vector.
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ int rc = 0;
+ u8 cond;
+
+ db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->cdev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions */
+ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ NULL) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ NULL);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->cdev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ if (pwm_regsize < QED_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->cdev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize,
+ QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
*/
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ n_cpus = num_active_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- return rc;
+
+ cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ if (cond)
+ qed_rdma_dpm_bar(p_hwfn, p_ptt);
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ norm_regsize,
+ pwm_regsize,
+ p_hwfn->dpi_size,
+ p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+ p_hwfn->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis);
+ return -EINVAL;
+ }
+
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return 0;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int hw_mode)
+{
+ return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
+ p_hwfn->port_id, hw_mode);
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -848,7 +955,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_init_rt(p_hwfn);
/* Set VLAN in NIG if needed */
- if (hw_mode & (1 << MODE_MF_SD)) {
+ if (hw_mode & BIT(MODE_MF_SD)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -856,7 +963,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Enable classification by MAC if needed */
- if (hw_mode & (1 << MODE_MF_SI)) {
+ if (hw_mode & BIT(MODE_MF_SI)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Configuring TAGMAC_CLS_TYPE\n");
STORE_RT_REG(p_hwfn,
@@ -871,7 +978,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != 0)
+ if (rc)
return rc;
/* PF Init sequence */
@@ -887,20 +994,9 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
- u32 val = 0;
-
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- if (p_hwfn->rel_pf_id == pf_id) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%d] is first ETH on engine\n",
- pf_id);
- val = 1;
- }
- qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
- }
- }
+ rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
if (b_hw_start) {
/* enable interrupts */
@@ -950,8 +1046,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_main_ptt);
memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
- p_hwfn->mcp_info->mfw_mb_cur,
- p_hwfn->mcp_info->mfw_mb_length);
+ p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
int qed_hw_init(struct qed_dev *cdev,
@@ -971,7 +1066,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
+ if (rc)
return rc;
}
@@ -988,8 +1083,7 @@ int qed_hw_init(struct qed_dev *cdev,
qed_calc_hw_mode(p_hwfn);
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
- &load_code);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
return rc;
@@ -1004,11 +1098,6 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
- spin_lock_init(&qm_lock);
- qm_lock_init = true;
- }
-
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1071,9 +1160,8 @@ int qed_hw_init(struct qed_dev *cdev,
}
#define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int i;
@@ -1084,8 +1172,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
+ (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
/* Dependent on number of connection/tasks, possibly
@@ -1190,8 +1277,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
}
DP_VERBOSE(p_hwfn,
- NETIF_MSG_IFDOWN,
- "Shutting down the fastpath\n");
+ NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1219,14 +1305,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static int qed_reg_assert(struct qed_hwfn *hwfn,
- struct qed_ptt *ptt, u32 reg,
- bool expected)
+static int qed_reg_assert(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 reg, bool expected)
{
- u32 assert_val = qed_rd(hwfn, ptt, reg);
+ u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
if (assert_val != expected) {
- DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
reg, expected);
return -EINVAL;
}
@@ -1306,8 +1391,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1317,7 +1401,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -1326,6 +1411,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
PXP_CONCRETE_FID_PFID);
p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
}
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
@@ -1333,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
+ * status blocks equally between L2 / RoCE but with consideration as
+ * to how many l2 queues / cnqs we have
+ */
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_features++;
+
+ feat_num[QED_RDMA_CNQ] =
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+ }
+#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
@@ -1373,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
+ resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
+ num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
resc_start[i] = resc_num[i] * enabled_func_idx;
@@ -1396,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
"RL = %d start = %d\n"
"MAC = %d start = %d\n"
"VLAN = %d start = %d\n"
- "ILT = %d start = %d\n",
+ "ILT = %d start = %d\n"
+ "LL2_QUEUE = %d start = %d\n",
p_hwfn->hw_info.resc_num[QED_SB],
p_hwfn->hw_info.resc_start[QED_SB],
p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
@@ -1412,13 +1519,14 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_num[QED_VLAN],
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
- p_hwfn->hw_info.resc_start[QED_ILT]);
+ p_hwfn->hw_info.resc_start[QED_ILT],
+ RESC_NUM(p_hwfn, QED_LL2_QUEUE),
+ RESC_START(p_hwfn, QED_LL2_QUEUE));
return 0;
}
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
@@ -1472,8 +1580,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
- core_cfg);
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
}
@@ -1484,11 +1591,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port, speed_cap_mask));
- link->speed.advertised_speeds =
- link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities =
- link->speed.advertised_speeds;
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
@@ -1517,8 +1624,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link->speed.forced_speed = 100000;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
- link_temp);
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1628,10 +1734,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
- p_hwfn->num_funcs_on_engine);
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
static int
@@ -1703,10 +1809,9 @@ static int qed_get_dev_info(struct qed_dev *cdev)
u32 tmp;
/* Read Vendor Id / Device Id */
- pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
- &cdev->vendor_id);
- pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
- &cdev->device_id);
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -1771,10 +1876,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ if (rc)
goto err0;
- }
/* Allocate the main PTT */
p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
@@ -1782,7 +1885,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
- if (rc != 0)
+ if (rc)
goto err1;
}
@@ -1804,10 +1907,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ if (rc)
goto err2;
- }
return rc;
err2:
@@ -2015,10 +2116,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2048,10 +2147,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
@@ -2068,13 +2165,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
void *p_virt = NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vmalloc(size);
- if (!pp_virt_addr_tbl) {
- DP_NOTICE(cdev,
- "Failed to allocate memory for the chain virtual addresses table\n");
+ pp_virt_addr_tbl = vzalloc(size);
+ if (!pp_virt_addr_tbl)
return -ENOMEM;
- }
- memset(pp_virt_addr_tbl, 0, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -2087,19 +2180,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
size, &p_pbl_phys, GFP_KERNEL);
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ if (!p_pbl_virt)
return -ENOMEM;
- }
for (i = 0; i < page_cnt; i++) {
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2134,7 +2223,8 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
if (rc) {
DP_NOTICE(cdev,
- "Cannot allocate a chain with the given arguments:\n"
+ "Cannot allocate a chain with the given arguments:\n");
+ DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
@@ -2183,8 +2273,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
return 0;
}
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
u8 min, max;
@@ -2203,8 +2292,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
u8 min, max;
@@ -2223,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
+ u8 *p_filter)
+{
+ *p_high = p_filter[1] | (p_filter[0] << 8);
+ *p_low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+}
+
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return 0;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an empty LLH filter to utilize\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is added at %d\n",
+ p_filter, i);
+
+ return 0;
+}
+
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is removed from %d\n",
+ p_filter, i);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
@@ -2386,8 +2566,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
* 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
*/
static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
- u16 vport_id, u32 req_rate,
- u32 min_pf_rate)
+ u16 vport_id, u32 req_rate, u32 min_pf_rate)
{
u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2471,7 +2650,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
- if (rc == 0)
+ if (!rc)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 343bb0344f62..b6711c106597 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
+ * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
* *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 6f9d3b831a2a..72eee29c677f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -536,6 +536,247 @@ struct core_conn_context {
struct regpair ustorm_st_padding[2];
};
+enum core_error_handle {
+ LL2_DROP_PACKET,
+ LL2_DO_NOTHING,
+ LL2_ASSERT,
+ MAX_CORE_ERROR_HANDLE
+};
+
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ MAX_CORE_EVENT_OPCODE
+};
+
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+};
+
+struct core_ll2_rx_prod {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+ __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START,
+ CORE_RAMROD_TX_QUEUE_START,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd;
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+struct core_rx_cqe_opaque_data {
+ __le32 data[2];
+};
+
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_TYPE_REGULAR,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+ CORE_RX_CQE_TYPE_SLOW_PATH,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+struct core_rx_fast_path_cqe {
+ u8 type;
+ u8 placement_offset;
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length;
+ __le16 vlan;
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved[4];
+};
+
+struct core_rx_gsi_offload_cqe {
+ u8 type;
+ u8 data_length_error;
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length;
+ __le16 vlan;
+ __le32 src_mac_addrhi;
+ __le16 src_mac_addrlo;
+ u8 reserved1[2];
+ __le32 gid_dst[4];
+};
+
+struct core_rx_slow_path_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ __le32 reserved1[7];
+};
+
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+ struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base;
+ struct regpair cqe_pbl_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 drop_ttl0_flg;
+ __le16 num_of_pbl_pages;
+ u8 inner_vlan_removal_en;
+ u8 queue_id;
+ u8 main_func_queue;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag;
+ u8 reserved[7];
+};
+
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 queue_id;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+struct core_tx_bd_flags {
+ u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
+
+};
+
+struct core_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 nw_vlan_or_lb_echo;
+ u8 bitfield0;
+#define CORE_TX_BD_NBDS_MASK 0xF
+#define CORE_TX_BD_NBDS_SHIFT 0
+#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
+#define CORE_TX_BD_RESERVED0_MASK 0x7
+#define CORE_TX_BD_RESERVED0_SHIFT 5
+ struct core_tx_bd_flags bd_flags;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED1_MASK 0x1
+#define CORE_TX_BD_RESERVED1_SHIFT 15
+};
+
+enum core_tx_dest {
+ CORE_TX_DEST_NW,
+ CORE_TX_DEST_LB,
+ MAX_CORE_TX_DEST
+};
+
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 stats_en;
+ u8 stats_id;
+ u8 conn_type;
+ __le16 pbl_size;
+ __le16 qm_pq_id;
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -636,9 +877,33 @@ struct hsi_fp_ver_struct {
};
/* Mstorm non-triggering VF zone */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID,
+ VF_ZONE_FUNC_NOT_ENABLED,
+ ETH_PACKET_TOO_SMALL,
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION,
+ ETH_ILLEGAL_INBAND_TAGS,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP,
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC,
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/* Mstorm VF zone */
@@ -705,13 +970,17 @@ struct pf_start_ramrod_data {
struct protocol_dcb_data {
u8 dcb_enable_flag;
+ u8 reserved_a;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved;
+ u8 reserved_b;
+ u8 reserved0;
};
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
+ u8 update_rx_def_ucast_clss;
+ u8 update_rx_def_non_ucast_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
@@ -727,7 +996,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[3];
+ __le16 reserved[2];
};
struct pf_update_ramrod_data {
@@ -736,16 +1005,17 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved2;
+ __le16 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -766,10 +1036,14 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
-/* Pstorm non-triggering VF zone */
+struct rdma_sent_stats {
+ struct regpair sent_bytes;
+ struct regpair sent_pkts;
+};
+
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
- struct regpair reserved[2];
+ struct rdma_sent_stats rdma_stats;
};
/* Pstorm VF zone */
@@ -786,7 +1060,11 @@ struct ramrod_header {
__le16 echo;
};
-/* Slowpath Element (SPQE) */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes;
+ struct regpair rcv_pkts;
+};
+
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -794,7 +1072,7 @@ struct slow_path_element {
/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {
- struct regpair reserved[2];
+ struct rdma_rcv_stats rdma_stats;
};
struct tstorm_per_port_stat {
@@ -802,9 +1080,14 @@ struct tstorm_per_port_stat {
struct regpair mac_error_discard;
struct regpair mftag_filter_discard;
struct regpair eth_mac_filter_discard;
- struct regpair reserved[5];
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair reserved;
+ struct regpair roce_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1[2];
+ struct regpair reserved1;
+ struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
@@ -870,7 +1153,13 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
-/* Attentions status block */
+enum vf_zone_size_mode {
+ VF_ZONE_SIZE_MODE_DEFAULT,
+ VF_ZONE_SIZE_MODE_DOUBLE,
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1442,13 +1731,6 @@ enum bin_dbg_buffer_type {
MAX_BIN_DBG_BUFFER_TYPE
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_RESERVED,
- CHIP_BB_B0,
- CHIP_RESERVED2,
- MAX_CHIP_IDS
-};
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
@@ -1527,6 +1809,371 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+/* condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* memory data for registers dump */
+struct dbg_dump_mem {
+ __le32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+};
+
+/* register data for registers dump */
+struct dbg_dump_reg {
+ __le32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* split header for registers dump */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ __le16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries; /* number of registers entries to check */
+ u8 entry_size; /* size of registers entry (in dwords) */
+ u8 start_entry; /* index of the first entry to check */
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id; /* Failing rule index */
+ __le16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ __le16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ __le16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ __le16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ __le16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
+ u8 hw_id; /* HW ID associated with the block */
+ u8 line_num; /* Debug line number to select */
+ u8 right_shift; /* Number of units to right the debug data (0-3) */
+ u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
+ u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
+ u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
+ */
+ u8 reserved;
+};
+
+/* Debug Bus Clients */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ __le32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 fast_enabled;
+ u8 fast_mode;
+ u8 slow_enabled;
+ u8 slow_mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+ __le32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ __le32 app_version; /* The tools version number of the application */
+ u8 state; /* The current debug bus state */
+ u8 hw_dwords; /* HW dwords per cycle */
+ u8 next_hw_id; /* Next HW ID to be associated with an input */
+ u8 num_enabled_blocks; /* Number of blocks enabled for recording */
+ u8 num_enabled_storms; /* Number of Storms enabled for recording */
+ u8 target; /* Output target */
+ u8 next_trigger_state; /* ID of next trigger state to be added */
+ u8 next_constraint_id; /* ID of next filter/trigger constraint to be
+ * added.
+ */
+ u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
+ u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
+ u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
+ * (0/1).
+ */
+ u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
+ u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
+ u8 adding_filter; /* If true, the next added constraint belong to the
+ * filter. Otherwise, it belongs to the last added
+ * trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 filter_pre_trigger; /* Indicates if the recording filter should be
+ * applied before the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 filter_post_trigger; /* Indicates if the recording filter should be
+ * applied after the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
+ * Otherwise, each input is assigned a different HW ID
+ * (0/1).
+ */
+ u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
+ * recording to this engine (0/1).
+ */
+ struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
+ * only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ __le16 reserved;
+ struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
+ DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
+ * recording.
+ */
+ DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
+ DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ /* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ /* records debug bus to the NW */
+ DBG_BUS_TARGET_ID_NIG,
+ /* records debug bus to a PCI buffer */
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ __le32 param_val[40]; /* Value of each GRC parameter. Array size must
+ * match the enum dbg_grc_params.
+ */
+ u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
+ * set by the user (0/1). Array size must
+ * match the enum dbg_grc_params.
+ */
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
+ DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
+ DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
+ DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
+ DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
+ DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
+ DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
+ DBG_GRC_PARAM_RESERVED, /* reserved */
+ DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
+ DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
+ DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
+ DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
+ DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
+ DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
+ /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ /* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+ /* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug reset registers */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -1579,9 +2226,45 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
MAX_DBG_STATUS
};
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ __le32 buf_size; /* Idle check buffer size in dwords */
+ u8 buf_size_set; /* Indicates if the idle check buffer size was set
+ * (0/1).
+ */
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* Debug Tools data (per HW function) */
+struct dbg_tools_data {
+ struct dbg_grc_data grc; /* GRC Dump data */
+ struct dbg_bus_data bus; /* Debug Bus data */
+ struct idle_chk_data idle_chk; /* Idle Check data */
+ u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
+ u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ */
+ u8 chip_id; /* Chip ID (from enum chip_ids) */
+ u8 platform_id; /* Platform ID (from enum platform_ids) */
+ u8 initialized; /* Indicates if the data was initialized */
+ u8 reserved;
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -1589,7 +2272,41 @@ enum dbg_status {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
-/* QM per-port init parameters */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc;
+ __le32 headroom_per_tc;
+ __le32 min_pkt_size;
+ __le32 max_ports_per_engine;
+ u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ __le16 weight;
+};
+
+struct init_ets_req {
+ __le32 mtu;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+struct init_nig_lb_rl_req {
+ __le16 lb_mac_rate;
+ __le16 lb_rate;
+ __le32 mtu;
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id;
+ u8 valid;
+};
+
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -1619,7 +2336,7 @@ struct init_qm_vport_params {
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
@@ -1627,15 +2344,50 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+struct fw_asserts_ram_section {
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
+ u8 list_dword_offset;
+ u8 list_element_dword_size;
+ u8 list_num_elements;
+ u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+ u8 major; /* Firmware major version number */
+ u8 minor; /* Firmware minor version number */
+ u8 rev; /* Firmware revision version number */
+ u8 eng; /* Firmware engineering version number (for bootleg versions) */
+};
+
+struct fw_ver_info {
+ __le16 tools_ver; /* Tools version number */
+ u8 image_id; /* FW image ID (e.g. main) */
+ u8 reserved1;
+ struct fw_ver_num num; /* FW version number */
+ __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver;
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+ __le32 grc_addr;
+ __le32 size;
+};
+
enum init_modes {
MODE_RESERVED,
MODE_BB_B0,
- MODE_RESERVED2,
+ MODE_K2,
MODE_ASIC,
+ MODE_RESERVED2,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
- MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -1644,7 +2396,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_40G,
- MODE_RESERVED7,
+ MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -1674,11 +2426,11 @@ struct bin_buffer_hdr {
/* binary init buffer types */
enum bin_init_buffer_type {
- BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
- BIN_BUF_IRO,
+ BIN_BUF_INIT_IRO,
MAX_BIN_INIT_BUFFER_TYPE
};
@@ -1902,8 +2654,276 @@ struct iro {
__le16 size;
};
+/***************************** Public Functions *******************************/
/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the collected GRC data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified dump buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+ * for idle check results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the idle check data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the mcp trace data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * - the trace meta data cannot be read (from NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the reg fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the IGU fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+ * buffer size for protection override window results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for protection
+ * override data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_protection_override_dump - Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the protection override data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the FW Asserts data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the
+ * specified results struct.
*
* @param p_hwfn
* @param results - Pointer to the attention read results
@@ -1915,47 +2935,241 @@ struct iro {
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
+/******************************** Constants **********************************/
+
#define MAX_NAME_LEN 16
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_get_status_str - Returns a string for the specified status.
+ *
+ * @param status - a debug status code.
+ *
+ * @return a string for the specified status
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+/**
+ * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_idle_chk_results - Prints idle check results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the idle check results.
+ * @param num_errors - OUT: number of errors found in idle check.
+ * @param num_warnings - OUT: number of warnings found in idle check.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+/**
+ * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - MCP Trace dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_mcp_trace_results - Prints MCP Trace results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_reg_fifo_results - Prints reg fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_igu_fifo_results - Prints IGU fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the IGU fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_protection_override_results_buf_size - Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_protection_override_results - Prints protection override
+ * results.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_fw_asserts_results - Prints FW Asserts results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the FW Asserts results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
/* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD \
- 0x00f000UL
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM \
- 0x010000UL
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM \
- 0x011000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
- 0x012000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM \
- 0x013000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
- 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
- 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM \
- 0x016000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM \
- 0x017000UL
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM \
- 0x018000UL
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -2003,7 +3217,7 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u8 start_vport;
u8 num_vports;
- u8 pf_wfq;
+ u16 pf_wfq;
u32 pf_rl;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
@@ -2138,6 +3352,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
@@ -2153,42 +3370,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[21].base + ((pf_id) * IRO[21].m1))
+ (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+ (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[23].base + ((pf_id) * IRO[23].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+ (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[25].base + ((pf_id) * IRO[25].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+ (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[26].base + ((ethtype) * IRO[26].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+ (IRO[27].base + ((ethtype) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[29].base + ((queue_id) * IRO[29].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
-
-static const struct iro iro_arr[46] = {
+ (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x78, 0x0, 0x0, 0x78},
{0x6318, 0x20, 0x0, 0x0, 0x20},
@@ -2201,20 +3466,21 @@ static const struct iro iro_arr[46] = {
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x4990, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x60f8, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xc9a8, 0x0, 0x0, 0x0, 0x4},
- {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x53a0, 0x80, 0x4, 0x0, 0x4},
+ {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba0, 0x80, 0x0, 0x0, 0x20},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0xf188, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xacf0, 0x0, 0x0, 0x0, 0xf0},
{0xade0, 0x8, 0x0, 0x0, 0x8},
@@ -2226,455 +3492,457 @@ static const struct iro iro_arr[46] = {
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
{0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12120, 0x10, 0x0, 0x0, 0x8},
- {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0x12c38, 0x10, 0x0, 0x0, 0x8},
+ {0x11aa0, 0x38, 0x0, 0x0, 0x18},
{0xa8c0, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
- {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
- {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
-
-#define RUNTIME_ARRAY_SIZE 33925
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30851
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+
+#define RUNTIME_ARRAY_SIZE 33927
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -3201,7 +4469,31 @@ struct eth_conn_context {
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
-/* opcodes for the event ring */
+enum eth_error_code {
+ ETH_OK = 0x00,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ MAX_ETH_ERROR_CODE
+};
+
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -3269,7 +4561,13 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
-/* Ethernet Ramrod Command IDs */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG,
+ ETH_IPV4_FIRST_FRAG,
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -3451,8 +4749,8 @@ struct rx_queue_start_ramrod_data {
u8 toggle_val;
u8 vf_rx_prod_index;
-
- u8 reserved[6];
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
__le16 reserved1;
struct regpair cqe_pbl_addr;
struct regpair bd_base;
@@ -3526,10 +4824,11 @@ struct tx_queue_start_ramrod_data {
__le16 pxp_st_index;
__le16 comp_agg_size;
__le16 queue_zone_id;
- __le16 test_dup_count;
+ __le16 reserved2;
__le16 pbl_size;
__le16 tx_queue_id;
-
+ __le16 same_as_last_id;
+ __le16 reserved[3];
struct regpair pbl_base_addr;
struct regpair bd_cons_address;
};
@@ -4926,8 +6225,8 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
@@ -4988,6 +6287,10 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+};
+
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
@@ -6639,6 +7942,35 @@ struct ystorm_iscsi_conn_ag_ctx {
__le32 reg2;
__le32 reg3;
};
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
#define VF_MAX_STATIC 192
#define MCP_GLOB_PATH_MAX 2
@@ -6646,6 +7978,7 @@ struct ystorm_iscsi_conn_ag_ctx {
#define MCP_GLOB_PORT_MAX 4
#define MCP_GLOB_FUNC_MAX 16
+typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
@@ -7236,8 +8569,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
@@ -7248,6 +8592,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
@@ -7285,6 +8632,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -7315,10 +8664,10 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_BW_UPDATE5,
- MFW_DRV_MSG_BW_UPDATE6,
- MFW_DRV_MSG_BW_UPDATE7,
- MFW_DRV_MSG_BW_UPDATE8,
- MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
@@ -7521,4 +8870,101 @@ struct nvm_cfg1 {
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define MCP_TRACE_SIZE 2048 /* 2kb */
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ struct mcp_trace trace;
+#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
+ u8 trace_buffer[MCP_TRACE_SIZE];
+#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
+ /* running_mfw has the same definition as in nvm_map.h.
+ * This bit indicate both the running dir, and the running bundle.
+ * It is set once when the LIM is loaded.
+ */
+ u32 running_mfw;
+#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
+ u32 build_time;
+#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
+ u32 reset_type;
+#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
+ u32 mfw_secure_mode;
+#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
+ u16 pme_status_pf_bitmap;
+#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
+ u16 pme_enable_pf_bitmap;
+#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
+ u32 mim_nvm_addr;
+ u32 mim_start_addr;
+ u32 ah_pcie_link_params;
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
+#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
+
+ u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_MAX,
+};
+
+#define DIR_ID_1 (0)
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index e17885321faf..6e4fae9b1430 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,8 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_KERNEL);
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
int i;
if (!p_pool)
@@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
return NULL;
}
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
}
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
}
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 new_hw_addr)
+ struct qed_ptt *p_ptt, u32 new_hw_addr)
{
u32 prev_hw_addr;
@@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
}
static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr)
+ struct qed_ptt *p_ptt, u32 hw_addr)
{
u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
@@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *addr,
- u32 hw_addr,
- size_t n,
- bool to_device)
+ void *addr, u32 hw_addr, size_t n, bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
size_t quota, done = 0;
@@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- void *dest, u32 hw_addr, size_t n)
+ struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr, void *src, size_t n)
+ struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
{
u16 control = 0;
@@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
}
void qed_port_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 port_id)
+ struct qed_ptt *p_ptt, u8 port_id)
{
u16 control = 0;
@@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u16 control = 0;
@@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
int qed_status = 0;
/* verify address is not NULL */
- if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
- ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
return -EINVAL;
}
@@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
NETIF_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
@@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
- *(((u32 *)command) + i) : 0;
+ *(((u32 *)p_command) + i) : 0;
qed_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
@@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
(i * sizeof(u32)), data);
}
- qed_wr(p_hwfn, p_ptt,
- qed_dmae_idx_to_go_cmd(idx_cmd),
- DMAE_GO_VALUE);
+ qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
return qed_status;
}
@@ -498,31 +481,23 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(u32),
- p_addr,
- GFP_KERNEL);
- if (!*p_comp) {
- DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ sizeof(u32), p_addr, GFP_KERNEL);
+ if (!*p_comp)
goto err;
- }
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
p_addr, GFP_KERNEL);
- if (!*p_cmd) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ if (!*p_cmd)
goto err;
- }
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32) * DMAE_MAX_RW_SIZE,
p_addr, GFP_KERNEL);
- if (!*p_buff) {
- DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ if (!*p_buff)
goto err;
- }
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
@@ -543,8 +518,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
- p_hwfn->dmae_info.p_completion_word,
- p_phys);
+ p_hwfn->dmae_info.p_completion_word, p_phys);
p_hwfn->dmae_info.p_completion_word = NULL;
}
@@ -552,8 +526,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
- p_hwfn->dmae_info.p_dmae_cmd,
- p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd, p_phys);
p_hwfn->dmae_info.p_dmae_cmd = NULL;
}
@@ -571,9 +544,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
{
- u32 wait_cnt = 0;
- u32 wait_cnt_limit = 10000;
-
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
int qed_status = 0;
barrier();
@@ -606,7 +577,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
u64 dst_addr,
u8 src_type,
u8 dst_type,
- u32 length)
+ u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +595,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(uintptr_t)src_addr,
- length * sizeof(u32));
+ length_dw * sizeof(u32));
break;
default:
return -EINVAL;
@@ -645,7 +616,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length_dw = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length_dw);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -654,16 +625,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
- src_addr,
- dst_addr,
- length);
+ src_addr, dst_addr, length_dw);
return qed_status;
}
if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
memcpy((void *)(uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
- length * sizeof(u32));
+ length_dw * sizeof(u32));
return 0;
}
@@ -730,10 +699,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status,
- src_addr,
- dst_addr,
- length_cur);
+ qed_status, src_addr, dst_addr, length_cur);
break;
}
}
@@ -743,10 +709,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr,
- u32 size_in_dwords,
- u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -768,9 +731,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -791,12 +755,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
return rc;
}
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- dma_addr_t source_addr,
- dma_addr_t dest_addr,
- u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 9866a20d2128..d567ba94c8d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
p_hwfn->rt_data.b_valid[i] = false;
}
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset, u32 *p_val,
- size_t size)
+ u32 rt_offset, u32 *p_val, size_t size)
{
size_t i;
@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u16 rt_offset,
- u16 size,
- bool b_must_dmae)
+ u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
- qed_wr(p_hwfn, p_ptt, addr + (i << 2),
- p_init_val[i]);
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
- if (rc != 0)
+ if (rc)
return rc;
/* Jump over the entire segment, including invalid entry */
@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count,
- QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
u32 i;
@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
- bool b_must_dmae,
- bool b_can_dmae)
+ bool b_must_dmae, bool b_can_dmae)
{
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
- u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
array_data = cdev->fw_data->arr_data;
- hdr = (union init_array_hdr *)(array_data +
- dmae_array_offset);
+ hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct init_write_op *cmd,
- bool b_can_dmae)
+ struct init_write_op *p_cmd, bool b_can_dmae)
{
- u32 data = le32_to_cpu(cmd->data);
- u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 data = le32_to_cpu(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- union init_write_args *arg = &cmd->args;
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ union init_write_args *arg = &p_cmd->args;
int rc = 0;
/* Sanitize */
@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
- qed_wr(p_hwfn, p_ptt, addr,
- le32_to_cpu(arg->inline_val));
+ data = le32_to_cpu(p_cmd->args.inline_val);
+ qed_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
- if (b_must_dmae ||
- (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ data = le32_to_cpu(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
- qed_init_fill(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
- rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_read_op *cmd)
+ struct qed_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
- u16 *offset,
- int modes)
+ u16 *p_offset, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*offset)++];
+ tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
- return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
- return (modes & (1 << tree_val)) ? 1 : 0;
+ return (modes & BIT(tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
- struct init_if_mode_op *p_cmd,
- int modes)
+ struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
- u32 phase,
- u32 phase_id)
+ u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
}
int qed_init_run(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int phase,
- int phase_id,
- int modes)
+ struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
@@ -483,10 +460,8 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
init_ops = cdev->fw_data->init_ops;
p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
- if (!p_hwfn->unzip_buf) {
- DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ if (!p_hwfn->unzip_buf)
return -ENOMEM;
- }
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
@@ -557,7 +532,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
/* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
- offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 8fa50fa23c8d..2adedc6fb6cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1775,10 +1775,9 @@ struct qed_sb_attn_info {
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
- struct qed_sb_attn_info *p_sb_desc)
+ struct qed_sb_attn_info *p_sb_desc)
{
- u16 rc = 0;
- u16 index;
+ u16 rc = 0, index;
/* Make certain HW write took affect */
mmiowb();
@@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
* @param asserted_bits newly asserted bits
* @return int
*/
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
- u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
u32 igu_mask;
/* Mask the source of the attention in the IGU */
- igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if ((p_bit->flags & ATTENTION_PARITY) &&
- !!(parities & (1 << bit_idx)))
+ !!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
- aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
@@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn,
- "MFW indication via attention\n");
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
@@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
return rc;
}
- if (deasserted_bits) {
+ if (deasserted_bits)
rc = qed_int_deassertion(p_hwfn, deasserted_bits);
- if (rc)
- return rc;
- }
return rc;
}
static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
- void __iomem *igu_addr,
- u32 ack_cons)
+ void __iomem *igu_addr, u32 ack_cons)
{
struct igu_prod_cons_update igu_ack = { 0 };
@@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
- DP_ERR(
- p_hwfn->cdev,
- "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
@@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
}
if (!sb_attn || !sb_attn->sb_attn) {
- DP_ERR(
- p_hwfn->cdev,
- "Attentions Status block is NULL - cannot check for new attentions!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
@@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
if (p_sb->sb_attn)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
+ p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
}
@@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
int i, j, k;
@@ -2378,15 +2365,13 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_sb_attn_info *p_sb;
- void *p_virt;
dma_addr_t p_phys = 0;
+ void *p_virt;
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
@@ -2394,7 +2379,6 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2412,9 +2396,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
- u8 pf_id,
- u16 vf_number,
- u8 vf_valid)
+ u8 pf_id, u16 vf_number, u8 vf_valid)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
@@ -2428,12 +2410,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
- /* setting the time resultion to a fixed value ( = 1) */
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
- QED_CAU_DEF_RX_TIMER_RES);
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
- QED_CAU_DEF_TX_TIMER_RES);
-
cau_state = CAU_HC_DISABLE_STATE;
if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2444,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
- u16 igu_sb_id,
- u16 vf_number,
- u8 vf_valid)
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid)
{
struct cau_sb_entry sb_entry;
@@ -2514,8 +2488,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- QED_COAL_RX_STATE_MACHINE,
- timeset);
+ QED_COAL_RX_STATE_MACHINE, timeset);
if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -2541,8 +2514,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u8 timeset)
{
struct cau_pi_entry pi_entry;
- u32 sb_offset;
- u32 pi_offset;
+ u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
@@ -2569,8 +2541,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_sb_info *sb_info)
+ struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
@@ -2590,8 +2561,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
*
* @return u16
*/
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
- u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2603,8 +2573,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
- (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+ if (sb_id == QED_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
return igu_sb_id;
}
@@ -2612,9 +2586,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2622,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
}
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2656,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
kfree(p_sb);
}
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_sb_sp_info *p_sb;
dma_addr_t p_phys = 0;
@@ -2694,17 +2664,14 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
SB_ALIGNED_SIZE(p_hwfn),
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2721,9 +2688,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
- void *cookie,
- u8 *sb_idx,
- __le16 **p_fw_cons)
+ void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int rc = -ENOMEM;
@@ -2764,8 +2729,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
}
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
@@ -2809,7 +2773,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
- if (rc != 0) {
+ if (rc) {
DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
return -EINVAL;
}
@@ -2822,8 +2786,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
@@ -2950,13 +2913,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid, b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 sb_id)
{
u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY +
- sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
struct qed_igu_block *p_block;
p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2944,7 @@ out:
return val;
}
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
@@ -2993,7 +2953,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
-
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
@@ -3104,22 +3063,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
*/
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
{
- u32 igu_pf_conf = 0;
-
- igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
}
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
{
- u64 intr_status = 0;
- u32 intr_status_lo = 0;
- u32 intr_status_hi = 0;
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE;
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
intr_status_lo = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,26 +3109,20 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
kfree(p_hwfn->sp_dpc);
}
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
- return rc;
- }
+
return rc;
}
@@ -3183,8 +3133,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
qed_int_sp_dpc_free(p_hwfn);
}
-void qed_int_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
qed_int_sb_attn_setup(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 401e738543b5..ddd410a91e13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
u16 rx_mode = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
/* TPA related fields */
- memset(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
+ memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
@@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->tx_switching_en = p_params->tx_switching;
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -109,8 +111,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
@@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- if (p_params->update_approx_mcast_flg) {
- p_ramrod->common.update_approx_mcast_flg = 1;
- for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
- __le32 val = cpu_to_le32(p_bins[i]);
+ if (!p_params->update_approx_mcast_flg)
+ return;
- p_ramrod->approx_mcast.bins[i] = val;
- }
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
- p_cmn->update_accept_any_vlan_flg =
- p_params->update_accept_any_vlan_flg;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
return rc;
}
@@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid(
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
int rc = -EINVAL;
/* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = params->vport_id;
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
+ if (rc)
return rc;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, params->queue_id, params->vport_id,
- params->sb);
+ opaque_fid,
+ cid, p_params->queue_id, p_params->vport_id, p_params->sb);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(params->sb);
- p_ramrod->sb_index = params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
- p_ramrod->complete_cqe_flg = 0;
- p_ramrod->complete_event_flg = 1;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
- p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
- p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- p_ramrod->vf_rx_prod_index = params->vf_qid;
- if (params->vf_qid)
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = p_params->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_use_zone_a_prod ? " [legacy]" : "",
+ p_params->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ }
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
static int
qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
- params->queue_id,
- params->sb,
- params->sb_idx,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
+ if (rc)
return rc;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
return rc;
*pp_prod = (u8 __iomem *)p_hwfn->regview +
@@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- params,
+ p_params,
abs_stats_id,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size);
+ cqe_pbl_addr, cqe_pbl_size, false);
- if (rc != 0)
+ if (rc)
qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
return rc;
@@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
- p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
- p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = qed_get_qm_pq(p_hwfn,
- PROTOCOLID_ETH,
- p_pq_params);
- p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
memset(&pq_params, 0, sizeof(pq_params));
/* Allocate a CID for the queue */
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_tx_cid->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
}
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
{
enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
@@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
- p_second_filter->type = p_first_filter->type;
- p_second_filter->mac_msb = p_first_filter->mac_msb;
- p_second_filter->mac_mid = p_first_filter->mac_mid;
- p_second_filter->mac_lsb = p_first_filter->mac_lsb;
- p_second_filter->vlan_id = p_first_filter->vlan_id;
- p_second_filter->vni = p_first_filter->vni;
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
p_first_filter->vport_id = vport_to_remove_from;
- p_second_filter->action = ETH_FILTER_ACTION_ADD;
- p_second_filter->vport_id = vport_to_add_to;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
p_first_filter->vport_id = vport_to_add_to;
memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
&p_ramrod, &p_ent,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
return rc;
}
@@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
p_header->assert_on_error = p_filter_cmd->assert_on_error;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc != 0) {
- DP_ERR(p_hwfn,
- "Unicast filter ADD command failed %d\n",
- rc);
+ if (rc) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
return rc;
}
@@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* Return:
******************************************************************************/
static u32 qed_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length,
- u32 crc32_seed,
- u8 complement)
+ u32 crc32_length, u32 crc32_seed, u8 complement)
{
- u32 byte = 0;
- u32 bit = 0;
- u8 msb = 0;
- u8 current_byte = 0;
- u32 crc32_result = crc32_seed;
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
if ((!crc32_packet) ||
(crc32_length == 0) ||
@@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static inline u32 qed_crc32c_le(u32 seed,
- u8 *mac,
- u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
{
u32 packet_buf[2] = { 0 };
@@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc, i;
- if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ if (p_filter_cmd->opcode == QED_FILTER_ADD)
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
&abs_vport_id);
- if (rc)
- return rc;
- } else {
+ else
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
&abs_vport_id);
- if (rc)
- return rc;
- }
+ if (rc)
+ return rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
- struct vport_update_ramrod_mcast *approx_mcast;
- approx_mcast = &p_ramrod->approx_mcast;
- approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_mcast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
+ comp_mode, p_comp_data);
}
return rc;
}
@@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_ucast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
- if (rc != 0)
+ comp_mode, p_comp_data);
+ if (rc)
break;
}
@@ -1590,8 +1578,7 @@ out:
}
}
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
@@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+ info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
}
qed_fill_dev_info(cdev, &info->common);
@@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev,
return 0;
}
-static int qed_stop_vport(struct qed_dev *cdev,
- u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
{
int rc, i;
@@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- vport_id);
+ p_hwfn->hw_info.opaque_fid, vport_id);
if (rc) {
DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev,
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg =
- params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg =
- params->update_vport_active_flg;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev,
* We need to re-fix the rss values per engine for CMT.
*/
if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss =
- &params->rss_params;
+ struct qed_update_vport_rss_params *rss = &params->rss_params;
int k, max = 0;
/* Find largest entry, since it's possible RSS needs to
@@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev,
QED_RSS_IND_TABLE_SIZE * sizeof(u16));
memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
QED_RSS_KEY_SIZE * sizeof(u32));
+ sp_params.rss_params = &sp_rss_params;
}
- sp_params.rss_params = &sp_rss_params;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev,
u16 cqe_pbl_size,
void __iomem **pp_prod)
{
- int rc, hwfn_index;
struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
hwfn_index = params->rss_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
@@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev,
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
params->rx_queue_id / cdev->num_hwfns,
- params->eq_completion_only,
- false);
+ params->eq_completion_only, false);
if (rc) {
DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
return rc;
@@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
memset(&accept_flags, 0, sizeof(accept_flags));
- accept_flags.update_rx_mode_config = 1;
- accept_flags.update_tx_mode_config = 1;
- accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
- QED_ACCEPT_MCAST_MATCHED |
- QED_ACCEPT_BCAST;
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
QED_ACCEPT_MCAST_MATCHED |
QED_ACCEPT_BCAST;
@@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
struct qed_filter_ucast ucast;
if (!params->vlan_valid && !params->mac_valid) {
- DP_NOTICE(
- cdev,
- "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ DP_NOTICE(cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
return -EINVAL;
}
@@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
for (i = 0; i < mcast.num_mc_addrs; i++)
ether_addr_copy(mcast.mac[i], params->mac[i]);
- return qed_filter_mcast_cmd(cdev, &mcast,
- QED_SPQ_MODE_CB, NULL);
+ return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev,
accept_flags = params->filter.accept_flags;
return qed_configure_filter_rx_mode(cdev, accept_flags);
default:
- DP_NOTICE(cdev, "Unknown filter type %d\n",
- (int)params->type);
+ DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
return -EINVAL;
}
}
static int qed_fp_cqe_completion(struct qed_dev *dev,
- u8 rss_id,
- struct eth_slow_path_rx_cqe *cqe)
+ u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
cqe);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 002114543451..e495d62fcc03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params {
u16 opaque_fid;
u8 vport_id;
u16 mtu;
+ bool check_mac;
+ bool check_ethtype;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
@@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
@@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000000..a6db10717d5c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,1792 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+struct qed_cb_ll2_info {
+ int rx_cnt;
+ u32 rx_size;
+ u8 handle;
+ bool frags_mapped;
+
+ /* Lock protecting LL2 buffer lists in sleepless context */
+ spinlock_t lock;
+ struct list_head list;
+
+ const struct qed_ll2_cb_ops *cbs;
+ void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+ struct list_head list;
+ void *data;
+ dma_addr_t phys_addr;
+};
+
+static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct sk_buff *skb = cookie;
+
+ /* All we need to do is release the mapping */
+ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+ cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+ b_last_fragment);
+
+ if (cdev->ll2->frags_mapped)
+ /* Case where mapped frags were received, need to
+ * free skb with nr_frags marked as 0
+ */
+ skb_shinfo(skb)->nr_frags = 0;
+
+ dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ u8 **data, dma_addr_t *phys_addr)
+{
+ *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ if (!(*data)) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ return -ENOMEM;
+ }
+
+ *phys_addr = dma_map_single(&cdev->pdev->dev,
+ ((*data) + NET_SKB_PAD),
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+ DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+ kfree((*data));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+ struct qed_ll2_buffer *buffer)
+{
+ spin_lock_bh(&cdev->ll2->lock);
+
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+
+ cdev->ll2->rx_cnt--;
+ if (!cdev->ll2->rx_cnt)
+ DP_INFO(cdev, "All LL2 entries were removed\n");
+
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
+{
+ u16 packet_length = le16_to_cpu(p_cqe->packet_length);
+ struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 vlan = le16_to_cpu(p_cqe->vlan);
+ u32 opaque_data_0, opaque_data_1;
+ u8 pad = p_cqe->placement_offset;
+ dma_addr_t new_phys_addr;
+ struct sk_buff *skb;
+ bool reuse = false;
+ int rc = -EINVAL;
+ u8 *new_data;
+
+ opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
+ opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+ "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+ (u64)p_pkt->rx_buf_addr, pad, packet_length,
+ le16_to_cpu(p_cqe->parse_flags.flags), vlan,
+ opaque_data_0, opaque_data_1);
+
+ if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buffer->data, packet_length, false);
+ }
+
+ /* Determine if data is valid */
+ if (packet_length < ETH_HLEN)
+ reuse = true;
+
+ /* Allocate a replacement for buffer; Reuse upon failure */
+ if (!reuse)
+ rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+ &new_phys_addr);
+
+ /* If need to reuse or there's no replacement buffer, repost this */
+ if (rc)
+ goto out_post;
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out_post;
+ }
+
+ pad += NET_SKB_PAD;
+ skb_reserve(skb, pad);
+ skb_put(skb, packet_length);
+ skb_checksum_none_assert(skb);
+
+ /* Get parital ethernet information instead of eth_type_trans(),
+ * Since we don't have an associated net_device.
+ */
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ /* Pass SKB onward */
+ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+ if (vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ opaque_data_0, opaque_data_1);
+ }
+
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+
+out_post:
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+
+ if (rc)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ bool b_lock,
+ bool b_only_active)
+{
+ struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+ if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+ return NULL;
+
+ if (!p_hwfn->p_ll2_info)
+ return NULL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ if (b_only_active) {
+ if (b_lock)
+ mutex_lock(&p_ll2_conn->mutex);
+ if (p_ll2_conn->b_active)
+ p_ret = p_ll2_conn;
+ if (b_lock)
+ mutex_unlock(&p_ll2_conn->mutex);
+ } else {
+ p_ret = p_ll2_conn;
+ }
+
+ return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+ *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ bool b_last_packet = false, b_last_frag = false;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_tx = &p_ll2_conn->tx_queue;
+
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_release_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+
+ }
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+ struct qed_ll2_tx_packet *p_pkt;
+ bool b_last_frag = false;
+ unsigned long flags;
+ dma_addr_t tx_frag;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->b_completing_packet) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ goto out;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ goto out;
+
+ p_tx->b_completing_packet = true;
+ p_tx->cur_completing_packet = *p_pkt;
+ num_bds_in_packet = p_pkt->bd_used;
+ list_del(&p_pkt->list_entry);
+
+ if (num_bds < num_bds_in_packet) {
+ DP_NOTICE(p_hwfn,
+ "Rest of BDs does not cover whole packet\n");
+ goto out;
+ }
+
+ num_bds -= num_bds_in_packet;
+ p_tx->bds_idx += num_bds_in_packet;
+ while (num_bds_in_packet--)
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+
+ p_tx->b_completing_packet = false;
+ rc = 0;
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags, bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u16 packet_length, parse_flags, vlan;
+ u32 src_mac_addrhi;
+ u16 src_mac_addrlo;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "GSI Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+
+ list_del(&p_pkt->list_entry);
+ parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+ p_ll2_info->my_id,
+ p_pkt->cookie,
+ p_pkt->rx_buf_addr,
+ packet_length,
+ p_cqe->rx_cqe_gsi.data_length_error,
+ parse_flags,
+ vlan,
+ src_mac_addrhi,
+ src_mac_addrlo, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags,
+ bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "LL2 Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
+ p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ while (cq_new_idx != cq_old_idx) {
+ bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+ cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+ switch (cqe->rx_cqe_sp.type) {
+ case CORE_RX_CQE_TYPE_SLOW_PATH:
+ DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
+ rc = -EINVAL;
+ break;
+ case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+ rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ case CORE_RX_CQE_TYPE_REGULAR:
+ rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ default:
+ rc = -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_rx = &p_ll2_conn->rx_queue;
+
+ while (!list_empty(&p_rx->active_descq)) {
+ dma_addr_t rx_buf_addr;
+ void *cookie;
+ bool b_last;
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ rx_buf_addr = p_pkt->rx_buf_addr;
+ cookie = p_pkt->cookie;
+
+ b_last = list_empty(&p_rx->active_descq);
+ }
+}
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ u8 action_on_error)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 cqe_pbl_size;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_rx->rx_sb_index;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base,
+ p_rx->rxq_chain.p_phys_addr);
+ cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+ qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+ p_ramrod->main_func_queue = 1;
+
+ if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+ p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+ p_ramrod->mf_si_bcast_accept_all = 1;
+ p_ramrod->mf_si_mcast_accept_all = 1;
+ } else {
+ p_ramrod->mf_si_bcast_accept_all = 0;
+ p_ramrod->mf_si_mcast_accept_all = 0;
+ }
+
+ p_ramrod->action_on_error.error_type = action_on_error;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct core_tx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params pq_params;
+ u16 pq_id = 0, pbl_size;
+ int rc = -EINVAL;
+
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_tx->tx_sb_index;
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ll2_conn->tx_stats_en = 1;
+ p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+ p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_tx->txq_chain));
+ pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ switch (conn_type) {
+ case QED_LL2_TYPE_ISCSI:
+ case QED_LL2_TYPE_ISCSI_OOO:
+ p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ break;
+ case QED_LL2_TYPE_ROCE:
+ p_ramrod->conn_type = PROTOCOLID_ROCE;
+ break;
+ default:
+ p_ramrod->conn_type = PROTOCOLID_ETH;
+ DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+ }
+
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+ p_ramrod->complete_event_flg = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+{
+ struct qed_ll2_rx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!rx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_bd),
+ &p_ll2_info->rx_queue.rxq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+ goto out;
+ }
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+ goto out;
+ }
+ p_ll2_info->rx_queue.descq_array = p_descq;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_fast_path_cqe),
+ &p_ll2_info->rx_queue.rcq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, rx_num_desc);
+
+out:
+ return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 tx_num_desc)
+{
+ struct qed_ll2_tx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!tx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ tx_num_desc,
+ sizeof(struct core_tx_bd),
+ &p_ll2_info->tx_queue.txq_chain);
+ if (rc)
+ goto out;
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ p_ll2_info->tx_queue.descq_array = p_descq;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, tx_num_desc);
+
+out:
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+ tx_num_desc);
+ return rc;
+}
+
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle)
+{
+ qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+ struct qed_ll2_info *p_ll2_info = NULL;
+ int rc;
+ u8 i;
+
+ if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ /* Find a free connection to be used */
+ for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+ if (p_hwfn->p_ll2_info[i].b_active) {
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ continue;
+ }
+
+ p_hwfn->p_ll2_info[i].b_active = true;
+ p_ll2_info = &p_hwfn->p_ll2_info[i];
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ break;
+ }
+ if (!p_ll2_info)
+ return -EBUSY;
+
+ p_ll2_info->conn_type = p_params->conn_type;
+ p_ll2_info->mtu = p_params->mtu;
+ p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
+ p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
+ p_ll2_info->tx_tc = p_params->tx_tc;
+ p_ll2_info->tx_dest = p_params->tx_dest;
+ p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
+ p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
+ p_ll2_info->gsi_enable = p_params->gsi_enable;
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ /* Register callbacks for the Rx/Tx queues */
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+
+ if (rx_num_desc) {
+ qed_int_register_cb(p_hwfn, comp_rx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->rx_queue.rx_sb_index,
+ &p_ll2_info->rx_queue.p_fw_cons);
+ p_ll2_info->rx_queue.b_cb_registred = true;
+ }
+
+ if (tx_num_desc) {
+ qed_int_register_cb(p_hwfn,
+ comp_tx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->tx_queue.tx_sb_index,
+ &p_ll2_info->tx_queue.p_fw_cons);
+ p_ll2_info->tx_queue.b_cb_registred = true;
+ }
+
+ *p_connection_handle = i;
+ return rc;
+
+q_allocate_fail:
+ qed_ll2_release_connection(p_hwfn, i);
+ return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ u8 action_on_error = 0;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
+ p_ll2_conn->ai_err_packet_too_big);
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+
+ return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+}
+
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ struct qed_ll2_tx_queue *p_tx;
+ int rc = -EINVAL;
+ u32 i, capacity;
+ u8 qid;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+ p_tx = &p_ll2_conn->tx_queue;
+
+ qed_chain_reset(&p_rx->rxq_chain);
+ qed_chain_reset(&p_rx->rcq_chain);
+ INIT_LIST_HEAD(&p_rx->active_descq);
+ INIT_LIST_HEAD(&p_rx->free_descq);
+ INIT_LIST_HEAD(&p_rx->posting_descq);
+ spin_lock_init(&p_rx->lock);
+ capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_rx->descq_array[i].list_entry,
+ &p_rx->free_descq);
+ *p_rx->p_fw_cons = 0;
+
+ qed_chain_reset(&p_tx->txq_chain);
+ INIT_LIST_HEAD(&p_tx->active_descq);
+ INIT_LIST_HEAD(&p_tx->free_descq);
+ INIT_LIST_HEAD(&p_tx->sending_descq);
+ spin_lock_init(&p_tx->lock);
+ capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_tx->descq_array[i].list_entry,
+ &p_tx->free_descq);
+ p_tx->cur_completing_bd_idx = 0;
+ p_tx->bds_idx = 0;
+ p_tx->b_completing_packet = false;
+ p_tx->cur_send_packet = NULL;
+ p_tx->cur_send_frag_num = 0;
+ p_tx->cur_completing_frag_num = 0;
+ *p_tx->p_fw_cons = 0;
+
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ p_ll2_conn->queue_id = qid;
+ p_ll2_conn->tx_stats_id = qid;
+ p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_ll2_conn->cid,
+ DQ_DEMS_LEGACY);
+
+ rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+ return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_rx_queue *p_rx,
+ struct qed_ll2_rx_packet *p_curp)
+{
+ struct qed_ll2_rx_packet *p_posting_packet = NULL;
+ struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ bool b_notify_fw = false;
+ u16 bd_prod, cq_prod;
+
+ /* This handles the flushing of already posted buffers */
+ while (!list_empty(&p_rx->posting_descq)) {
+ p_posting_packet = list_first_entry(&p_rx->posting_descq,
+ struct qed_ll2_rx_packet,
+ list_entry);
+ list_del(&p_posting_packet->list_entry);
+ list_add_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ /* This handles the supplied packet [if there is one] */
+ if (p_curp) {
+ list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ if (!b_notify_fw)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+ cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+}
+
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw)
+{
+ struct core_rx_bd_with_buff_len *p_curb = NULL;
+ struct qed_ll2_rx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags;
+ void *p_data;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ if (!list_empty(&p_rx->free_descq))
+ p_curp = list_first_entry(&p_rx->free_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (p_curp) {
+ if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+ qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+ p_data = qed_chain_produce(&p_rx->rxq_chain);
+ p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+ qed_chain_produce(&p_rx->rcq_chain);
+ }
+ }
+
+ /* If we're lacking entires, let's try to flush buffers to FW */
+ if (!p_curp || !p_curb) {
+ rc = -EBUSY;
+ p_curp = NULL;
+ goto out_notify;
+ }
+
+ /* We have an Rx packet we can fill */
+ DMA_REGPAIR_LE(p_curb->addr, addr);
+ p_curb->buff_length = cpu_to_le16(buf_len);
+ p_curp->rx_buf_addr = addr;
+ p_curp->cookie = cookie;
+ p_curp->rxq_bd = p_curb;
+ p_curp->buf_length = buf_len;
+ list_del(&p_curp->list_entry);
+
+ /* Check if we only want to enqueue this packet without informing FW */
+ if (!notify_fw) {
+ list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+ goto out;
+ }
+
+out_notify:
+ qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_tx_queue *p_tx,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *p_cookie,
+ u8 notify_fw)
+{
+ list_del(&p_curp->list_entry);
+ p_curp->cookie = p_cookie;
+ p_curp->bd_used = num_of_bds;
+ p_curp->notify_fw = notify_fw;
+ p_tx->cur_send_packet = p_curp;
+ p_tx->cur_send_frag_num = 0;
+
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_tx->cur_send_frag_num++;
+}
+
+static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type type,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
+{
+ struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+ u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+ struct core_tx_bd *start_bd = NULL;
+ u16 frag_idx;
+
+ start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+ cpu_to_le16(l4_hdr_offset_w));
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+ start_bd->bd_flags.as_bitfield = bd_flags;
+ start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
+ CORE_TX_BD_FLAGS_START_BD_SHIFT;
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ DMA_REGPAIR_LE(start_bd->addr, first_frag);
+ start_bd->nbytes = cpu_to_le16(first_frag_len);
+
+ SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
+ type);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+ p_ll2->queue_id,
+ p_ll2->cid,
+ p_ll2->conn_type,
+ prod_idx,
+ first_frag_len,
+ num_of_bds,
+ le32_to_cpu(start_bd->addr.hi),
+ le32_to_cpu(start_bd->addr.lo));
+
+ if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ return;
+
+ /* Need to provide the packet with additional BDs for frags */
+ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+ frag_idx < num_of_bds; frag_idx++) {
+ struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+ *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bitfield1 = 0;
+ (*p_bd)->bitfield0 = 0;
+ p_curp->bds_set[frag_idx].tx_frag = 0;
+ p_curp->bds_set[frag_idx].frag_len = 0;
+ }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct core_db_data db_msg = { 0, 0, 0 };
+ u16 bd_prod;
+
+ /* If there are missing BDs, don't do anything now */
+ if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+ p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+ return;
+
+ /* Push the current packet to the list and clean after it */
+ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+ &p_ll2_conn->tx_queue.sending_descq);
+ p_ll2_conn->tx_queue.cur_send_packet = NULL;
+ p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+ /* Notify FW of packet only if requested to */
+ if (!b_notify)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+ while (!list_empty(&p_tx->sending_descq)) {
+ p_pkt = list_first_entry(&p_tx->sending_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ }
+
+ SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+ db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+ /* Make sure the BDs data is updated before ringing the doorbell */
+ wmb();
+
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+ p_ll2_conn->queue_id,
+ p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw)
+{
+ struct qed_ll2_tx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_chain *p_tx_chain;
+ unsigned long flags;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_tx = &p_ll2_conn->tx_queue;
+ p_tx_chain = &p_tx->txq_chain;
+
+ if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ return -EIO;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->cur_send_packet) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ /* Get entry, but only if we have tx elements for it */
+ if (!list_empty(&p_tx->free_descq))
+ p_curp = list_first_entry(&p_tx->free_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ p_curp = NULL;
+
+ if (!p_curp) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (qed_roce_flavor == QED_LL2_ROCE) {
+ roce_flavor = CORE_ROCE;
+ } else if (qed_roce_flavor == QED_LL2_RROCE) {
+ roce_flavor = CORE_RROCE;
+ } else {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Prepare packet and BD, and perhaps send a doorbell to FW */
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
+ num_of_bds, first_frag,
+ first_frag_len, cookie, notify_fw);
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
+ num_of_bds, CORE_TX_DEST_NW,
+ vlan, bd_flags, l4_hdr_offset_w,
+ roce_flavor,
+ first_frag, first_frag_len);
+
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes)
+{
+ struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ u16 cur_send_frag_num = 0;
+ struct core_tx_bd *p_bd;
+ unsigned long flags;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ if (!p_ll2_conn->tx_queue.cur_send_packet)
+ return -EINVAL;
+
+ p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+ cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+ if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ return -EINVAL;
+
+ /* Fill the BD information, and possibly notify FW */
+ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+ DMA_REGPAIR_LE(p_bd->addr, addr);
+ p_bd->nbytes = cpu_to_le16(nbytes);
+ p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+ p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+ p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+ spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+ spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+ return 0;
+}
+
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ int rc = -EINVAL;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ }
+
+ return rc;
+}
+
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ kfree(p_ll2_conn->tx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+ kfree(p_ll2_conn->rx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+ qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+ mutex_lock(&p_ll2_conn->mutex);
+ p_ll2_conn->b_active = false;
+ mutex_unlock(&p_ll2_conn->mutex);
+}
+
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ll2_info *p_ll2_connections;
+ u8 i;
+
+ /* Allocate LL2's set struct */
+ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+ sizeof(struct qed_ll2_info), GFP_KERNEL);
+ if (!p_ll2_connections) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+ return NULL;
+ }
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ p_ll2_connections[i].my_id = i;
+
+ return p_ll2_connections;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ int i;
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ mutex_init(&p_ll2_connections[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ kfree(p_ll2_connections);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_tstorm_per_queue_stat tstats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->packet_too_big_discard =
+ HILO_64_REGPAIR(tstats.packet_too_big_discard);
+ p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_ustorm_per_queue_stat ustats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_pstorm_per_queue_stat pstats;
+ u8 stats_id = p_ll2_conn->tx_stats_id;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+ !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ if (p_ll2_conn->tx_stats_en)
+ _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie)
+{
+ cdev->ll2->cbs = ops;
+ cdev->ll2->cb_cookie = cookie;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ struct qed_ll2_info ll2_info;
+ struct qed_ll2_buffer *buffer;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ptt *p_ptt;
+ int rc, i;
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+ cdev->ll2->frags_mapped = params->frags_mapped;
+
+ /*Allocate memory for LL2 */
+ DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
+ cdev->ll2->rx_size);
+ for (i = 0; i < QED_LL2_RX_SIZE; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
+ if (rc) {
+ kfree(buffer);
+ goto fail;
+ }
+
+ list_add_tail(&buffer->list, &cdev->ll2->list);
+ }
+
+ switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ conn_type = QED_LL2_TYPE_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ conn_type = QED_LL2_TYPE_ROCE;
+ break;
+ default:
+ conn_type = QED_LL2_TYPE_TEST;
+ }
+
+ /* Prepare the temporary ll2 information */
+ memset(&ll2_info, 0, sizeof(ll2_info));
+ ll2_info.conn_type = conn_type;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = 0;
+ ll2_info.tx_dest = CORE_TX_DEST_NW;
+ ll2_info.gsi_enable = 1;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
+ QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+ &cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 connection\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establish LL2 connection\n");
+ goto release_fail;
+ }
+
+ /* Post all Rx buffers to FW */
+ spin_lock_bh(&cdev->ll2->lock);
+ list_for_each_entry(buffer, &cdev->ll2->list, list) {
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc) {
+ DP_INFO(cdev,
+ "Failed to post an Rx buffer; Deleting it\n");
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+ kfree(buffer);
+ } else {
+ cdev->ll2->rx_cnt++;
+ }
+ }
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ if (!cdev->ll2->rx_cnt) {
+ DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
+ goto release_terminate;
+ }
+
+ if (!is_valid_ether_addr(params->ll2_mac_address)) {
+ DP_INFO(cdev, "Invalid Ethernet address\n");
+ goto release_terminate;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto release_terminate;
+ }
+
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ params->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ if (rc) {
+ DP_ERR(cdev, "Failed to allocate LLH filter\n");
+ goto release_terminate_all;
+ }
+
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+
+ return 0;
+
+release_terminate_all:
+
+release_terminate:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+release_fail:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+fail:
+ qed_ll2_kill_buffers(cdev);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+ return -EINVAL;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto fail;
+ }
+
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ cdev->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+fail:
+ return -EINVAL;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+{
+ const skb_frag_t *frag;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+ u8 flags = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+ return -EINVAL;
+ }
+
+ if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+ 1 + skb_shinfo(skb)->nr_frags);
+ return -EINVAL;
+ }
+
+ mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev, "SKB mapping failed\n");
+ return -EINVAL;
+ }
+
+ /* Request HW to calculate IP csum */
+ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan = skb_vlan_tag_get(skb);
+ flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ }
+
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ 1 + skb_shinfo(skb)->nr_frags,
+ vlan, flags, 0, 0 /* RoCE FLAVOR */,
+ mapping, skb->len, skb, 1);
+ if (rc)
+ goto err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ if (!cdev->ll2->frags_mapped) {
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev,
+ mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
+ }
+ } else {
+ mapping = page_to_phys(skb_frag_page(frag)) |
+ frag->page_offset;
+ }
+
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ mapping,
+ skb_frag_size(frag));
+
+ /* if failed not much to do here, partial packet has been posted
+ * we can't free memory, will need to wait for completion.
+ */
+ if (rc)
+ goto err2;
+ }
+
+ return 0;
+
+err:
+ dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+
+err2:
+ return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ if (!cdev->ll2)
+ return -EINVAL;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = &qed_ll2_start,
+ .stop = &qed_ll2_stop,
+ .start_xmit = &qed_ll2_start_xmit,
+ .register_cb_ops = &qed_ll2_register_cb_ops,
+ .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+ return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+ kfree(cdev->ll2);
+ cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000000..80a5dc2d652d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,316 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+
+enum qed_ll2_roce_flavor_type {
+ QED_LL2_ROCE,
+ QED_LL2_RROCE,
+ MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_conn_type {
+ QED_LL2_TYPE_RESERVED,
+ QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TEST,
+ QED_LL2_TYPE_ISCSI_OOO,
+ QED_LL2_TYPE_RESERVED2,
+ QED_LL2_TYPE_ROCE,
+ QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_RX_CONN_TYPE
+};
+
+struct qed_ll2_rx_packet {
+ struct list_head list_entry;
+ struct core_rx_bd_with_buff_len *rxq_bd;
+ dma_addr_t rx_buf_addr;
+ u16 buf_length;
+ void *cookie;
+ u8 placement_offset;
+ u16 parse_flags;
+ u16 packet_length;
+ u16 vlan;
+ u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+ struct list_head list_entry;
+ u16 bd_used;
+ u16 vlan;
+ u16 l4_hdr_offset_w;
+ u8 bd_flags;
+ bool notify_fw;
+ void *cookie;
+
+ struct {
+ struct core_tx_bd *txq_bd;
+ dma_addr_t tx_frag;
+ u16 frag_len;
+ } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+};
+
+struct qed_ll2_rx_queue {
+ /* Lock protecting the Rx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain rxq_chain;
+ struct qed_chain rcq_chain;
+ u8 rx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head posting_descq;
+ struct qed_ll2_rx_packet *descq_array;
+ void __iomem *set_prod_addr;
+};
+
+struct qed_ll2_tx_queue {
+ /* Lock protecting the Tx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain txq_chain;
+ u8 tx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head sending_descq;
+ struct qed_ll2_tx_packet *descq_array;
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
+ u16 cur_completing_bd_idx;
+ void __iomem *doorbell_addr;
+ u16 bds_idx;
+ u16 cur_send_frag_num;
+ u16 cur_completing_frag_num;
+ bool b_completing_packet;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ enum qed_ll2_conn_type conn_type;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
+ u16 mtu;
+ u8 rx_drop_ttl0_flg;
+ u8 rx_vlan_removal_en;
+ u8 tx_tc;
+ enum core_tx_dest tx_dest;
+ enum core_error_handle ai_err_packet_too_big;
+ enum core_error_handle ai_err_no_buf;
+ u8 tx_stats_en;
+ struct qed_ll2_rx_queue rx_queue;
+ struct qed_ll2_tx_queue tx_queue;
+ u8 gsi_enable;
+};
+
+/**
+ * @brief qed_ll2_acquire_connection - allocate resources,
+ * starts rx & tx (if relevant) queues pair. Provides
+ * connecion handler as output parameter.
+ *
+ * @param p_hwfn
+ * @param p_params Contain various configuration properties
+ * @param rx_num_desc
+ * @param tx_num_desc
+ *
+ * @param p_connection_handle Output container for LL2 connection's handle
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle);
+
+/**
+ * @brief qed_ll2_establish_connection - start previously
+ * allocated LL2 queues pair
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param addr rx (physical address) buffers to submit
+ * @param cookie
+ * @param notify_fw produce corresponding Rx BD immediately
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+ * to prepare Tx packet submission to FW.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param num_of_bds a number of requested BD equals a number of
+ * fragments in Tx packet
+ * @param vlan VLAN to insert to packet (if insertion set)
+ * @param bd_flags
+ * @param l4_hdr_offset_w L4 Header Offset from start of packet
+ * (in words). This is needed if both l4_csum
+ * and ipv6_ext are set
+ * @param first_frag
+ * @param first_frag_len
+ * @param cookie
+ *
+ * @param notify_fw
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_release_connection - releases resources
+ * allocated for LL2 connection
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ */
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ * @param addr
+ * @param nbytes
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+/**
+ * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param p_stats
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * @brief qed_ll2_alloc - Allocates LL2 connections set
+ *
+ * @param p_hwfn
+ *
+ * @return pointer to alocated qed_ll2_info or NULL
+ */
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ll2_setup - Inits LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+
+/**
+ * @brief qed_ll2_free - Releases LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c7dc34bfdd0a..4ee3151e80c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,15 +22,22 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#define QED_ROCE_QPS (8192)
+#define QED_ROCE_DPIS (8)
+#endif
+
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -51,8 +58,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
static int __init qed_init(void)
{
- pr_notice("qed_init called\n");
-
pr_info("%s", version);
return 0;
@@ -106,8 +111,7 @@ static void qed_free_pci(struct qed_dev *cdev)
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
-static int qed_init_pci(struct qed_dev *cdev,
- struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id;
int rc;
@@ -207,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported =
- (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
+ QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -263,8 +267,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
}
/* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
- pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
{
if (!cdev)
return -ENODEV;
@@ -366,8 +369,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
- rc = pci_enable_msix_exact(cdev->pdev,
- int_params->msix_table, cnt);
+ rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+ cnt);
if (!rc)
rc = cnt;
}
@@ -439,6 +442,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
}
out:
+ if (!rc)
+ DP_INFO(cdev, "Using %s interrupts\n",
+ int_params->out.int_mode == QED_INT_MODE_INTA ?
+ "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+ "MSI" : "MSIX");
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
@@ -514,19 +522,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
struct qed_dev *cdev = hwfn->cdev;
+ u32 int_mode;
int rc = 0;
u8 id;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX) {
id = hwfn->my_id;
snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
- if (!rc)
- DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
- "Requested slowpath MSI-X\n");
} else {
unsigned long flags = 0;
@@ -541,6 +548,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
flags, cdev->name, cdev);
}
+ if (rc)
+ DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+ else
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath %s\n",
+ (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
return rc;
}
@@ -581,6 +595,8 @@ static int qed_nic_stop(struct qed_dev *cdev)
}
}
+ qed_dbg_pf_exit(cdev);
+
return rc;
}
@@ -599,7 +615,16 @@ static int qed_nic_reset(struct qed_dev *cdev)
static int qed_nic_setup(struct qed_dev *cdev)
{
- int rc;
+ int rc, i;
+
+ /* Determine if interface is going to require LL2 */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->using_ll2 = true;
+ }
+ }
rc = qed_resc_alloc(cdev);
if (rc)
@@ -657,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ int num_l2_queues;
+#endif
int rc;
int i;
@@ -687,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ num_l2_queues = 0;
+ for_each_hwfn(cdev, i)
+ num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA,
+ "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+ cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+ if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+ cdev->int_params.rdma_msix_cnt =
+ (cdev->int_params.fp_msix_cnt - num_l2_queues)
+ / cdev->num_hwfns;
+ cdev->int_params.rdma_msix_base =
+ cdev->int_params.fp_msix_base + num_l2_queues;
+ cdev->int_params.fp_msix_cnt = num_l2_queues;
+ } else {
+ cdev->int_params.rdma_msix_cnt = 0;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+ cdev->int_params.rdma_msix_cnt,
+ cdev->int_params.rdma_msix_base);
+#endif
+
return 0;
}
@@ -790,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -834,13 +894,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (IS_PF(cdev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ if (rc)
goto err2;
- }
/* First Dword used to diffrentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
+
+ qed_dbg_pf_init(cdev);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -864,6 +924,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ /* Allocate LL2 interface if needed */
+ if (QED_LEADING_HWFN(cdev)->using_ll2) {
+ rc = qed_ll2_alloc_if(cdev);
+ if (rc)
+ goto err3;
+ }
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
@@ -884,6 +950,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err3:
+ qed_hw_stop(cdev);
err2:
qed_hw_timers_stop_all(cdev);
if (IS_PF(cdev))
@@ -906,6 +974,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_ll2_dealloc_if(cdev);
+
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
@@ -974,8 +1044,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
@@ -1025,20 +1094,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
- (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
+ (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
- if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1168,50 +1240,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = SUPPORTED_FIBRE;
+ if_link->supported_caps = QED_LM_FIBRE_BIT;
if (params.speed.autoneg)
- if_link->supported_caps |= SUPPORTED_Autoneg;
+ if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
- if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
if (params.pause.autoneg || params.pause.forced_rx ||
params.pause.forced_tx)
- if_link->supported_caps |= SUPPORTED_Pause;
+ if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= 0;
+ if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= 0;
+ if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.link_up)
if_link->speed = link.speed;
@@ -1231,33 +1309,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Half;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_FD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_10G)
- if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_40G)
- if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_50G)
- if_link->lp_caps |= 0;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_100G)
- if_link->lp_caps |= 0;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
+ if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.an_complete)
- if_link->lp_caps |= SUPPORTED_Autoneg;
+ if_link->lp_caps |= QED_LM_Autoneg_BIT;
if (link.partner_adv_pause)
- if_link->lp_caps |= SUPPORTED_Pause;
+ if_link->lp_caps |= QED_LM_Pause_BIT;
if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
- if_link->lp_caps |= SUPPORTED_Asym_Pause;
+ if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
}
static void qed_get_current_link(struct qed_dev *cdev,
@@ -1385,9 +1459,32 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .dbg_all_data = &qed_dbg_all_data,
+ .dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats)
+{
+ struct qed_eth_stats eth_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+ qed_get_vport_stats(cdev, &eth_stats);
+ stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ break;
+ default:
+ DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f776a77794c5..bdc9ba92f6d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
return true;
}
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
- p_info->mfw_mb_shadow =
- kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_KERNEL);
+ p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
@@ -177,7 +171,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
return 0;
err:
- DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
qed_mcp_free(p_hwfn);
return -ENOMEM;
}
@@ -189,8 +182,7 @@ err:
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{
spin_lock_bh(&p_hwfn->mcp_info->lock);
@@ -221,15 +213,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
@@ -326,7 +316,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
*o_mcp_resp = 0;
rc = -EAGAIN;
}
@@ -342,7 +333,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -398,9 +389,36 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+{
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+
+ return 0;
+}
+
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_load_code)
+ struct qed_ptt *p_ptt, u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
@@ -527,8 +545,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
"Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
transceiver_state,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data)));
+ offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
ETH_TRANSCEIVER_STATE);
@@ -540,8 +557,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_reset)
+ struct qed_ptt *p_ptt, bool b_reset)
{
struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
@@ -557,8 +573,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
"Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- link_status)));
+ offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
@@ -635,6 +650,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_20G : 0;
p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_40G : 0;
p_link->partner_adv_speed |=
@@ -722,6 +740,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum qed_mcp_protocol_type stats_type;
+ union qed_mcp_protocol_stats stats;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ u32 hsi_param;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = QED_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ stats_type = QED_MCP_FCOE_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+ break;
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ stats_type = QED_MCP_ISCSI_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+ break;
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ stats_type = QED_MCP_RDMA_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ memcpy(&union_data, &stats, sizeof(stats));
+ mb_params.p_data_src = &union_data;
+ qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
struct public_func *p_shmem_info)
{
@@ -752,8 +812,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
+ struct public_func *p_data, int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
@@ -763,51 +822,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
memset(p_data, 0, sizeof(*p_data));
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf)
-{
- struct public_func shmem_info;
- int i;
-
- /* Find first Ethernet interface in port */
- for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
- i += p_hwfn->cdev->num_ports_in_engines) {
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID_BY_REL(p_hwfn, i));
-
- if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
- continue;
-
- if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
- FUNC_MF_CFG_PROTOCOL_ETHERNET) {
- *p_pf = (u8)i;
- return 0;
- }
- }
-
- DP_NOTICE(p_hwfn,
- "Failed to find on port an ethernet interface in MF_SI mode\n");
-
- return -EINVAL;
-}
-
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
@@ -867,6 +895,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
@@ -940,8 +974,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
@@ -950,7 +983,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -1003,15 +1036,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_mcp_function_info *info;
struct public_func shmem_info;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &info->protocol)) {
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
@@ -1072,15 +1103,13 @@ struct qed_mcp_link_capabilities
return &p_hwfn->mcp_info->link_capabilities;
}
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 1000,
- &resp, &param);
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
msleep(1020);
@@ -1089,8 +1118,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_flash_size)
+ struct qed_ptt *p_ptt, u32 *p_flash_size)
{
u32 flash_size;
@@ -1168,8 +1196,35 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_led_mode mode)
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 value, cpu_mode;
+
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+ return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
u32 resp = 0, param = 0, drv_mb_param;
int rc;
@@ -1195,6 +1250,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not acknowledge mask parity request. Old MFW?\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 7f319aa1b229..dff520ed069b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -60,9 +60,10 @@ struct qed_mcp_link_state {
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
-#define QED_LINK_PARTNER_SPEED_40G BIT(4)
-#define QED_LINK_PARTNER_SPEED_50G BIT(5)
-#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+#define QED_LINK_PARTNER_SPEED_25G BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G BIT(7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
@@ -105,6 +106,47 @@ struct qed_mcp_drv_version {
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
+struct qed_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+ QED_MCP_LAN_STATS,
+ QED_MCP_FCOE_STATS,
+ QED_MCP_ISCSI_STATS,
+ QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+ struct qed_mcp_lan_stats lan_stats;
+ struct qed_mcp_fcoe_stats fcoe_stats;
+ struct qed_mcp_iscsi_stats iscsi_stats;
+ struct qed_mcp_rdma_stats rdma_stats;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -426,6 +468,29 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
@@ -447,6 +512,26 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
@@ -458,6 +543,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
u8 min_bw);
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf);
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f6b86ca1ff79..b414a0542177 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -116,8 +116,14 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+ 0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
+#define TCFC_REG_STRONG_ENABLE_VF \
+ 0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+ 0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
@@ -202,6 +208,26 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+ 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+ 32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+ 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+ 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+ 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
+ 0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+ 16
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -258,6 +284,8 @@
0x1f0a1cUL
#define PRS_REG_ROCE_DEST_QP_MAX_PF \
0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+ 0x1f096cUL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -521,4 +549,910 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+
+#define PGLCS_REG_DBG_SELECT \
+ 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE \
+ 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT \
+ 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID \
+ 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME \
+ 0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+ 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+ 0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+ 0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+ 0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define DMAE_REG_DBG_SELECT \
+ 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+ 0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+ 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+ 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+ 0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+ 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+ 0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+ 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+ 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+ 0x040484UL
+#define GRC_REG_DBG_SELECT \
+ 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+ 0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+ 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID \
+ 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME \
+ 0x0500b4UL
+#define UMAC_REG_DBG_SELECT \
+ 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE \
+ 0x051098UL
+#define UMAC_REG_DBG_SHIFT \
+ 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID \
+ 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME \
+ 0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+ 0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+ 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+ 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+ 0x052444UL
+#define PCIE_REG_DBG_SELECT \
+ 0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+ 0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+ 0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+ 0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+ 0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+ 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+ 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+ 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+ 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+ 0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+ 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+ 0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+ 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID \
+ 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME \
+ 0x181588UL
+#define CAU_REG_DBG_SELECT \
+ 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+ 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+ 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID \
+ 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME \
+ 0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+ 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+ 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+ 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID \
+ 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME \
+ 0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2 \
+ 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+ 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 \
+ 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 \
+ 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+ 0x218264UL
+#define PRM_REG_DBG_SELECT \
+ 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+ 0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+ 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID \
+ 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME \
+ 0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+ 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+ 0x238704UL
+#define SRC_REG_DBG_SHIFT \
+ 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID \
+ 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME \
+ 0x238710UL
+#define RSS_REG_DBG_SELECT \
+ 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+ 0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+ 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID \
+ 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME \
+ 0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+ 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+ 0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+ 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID \
+ 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME \
+ 0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+ 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+ 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+ 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+ 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+ 0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+ 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+ 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+ 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+ 0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+ 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+ 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+ 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+ 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+ 0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+ 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+ 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+ 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+ 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+ 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+ 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+ 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+ 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+ 0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+ 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+ 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+ 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+ 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+ 0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+ 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+ 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+ 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+ 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+ 0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+ 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+ 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+ 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+ 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+ 0x2a8410UL
+#define TM_REG_DBG_SELECT \
+ 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE \
+ 0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+ 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+ 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+ 0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+ 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+ 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+ 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+ 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+ 0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+ 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+ 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+ 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+ 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+ 0x2e0510UL
+#define QM_REG_DBG_SELECT \
+ 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE \
+ 0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+ 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+ 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+ 0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+ 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+ 0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+ 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+ 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+ 0x300510UL
+#define TDIF_REG_DBG_SELECT \
+ 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+ 0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+ 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+ 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+ 0x310510UL
+#define BRB_REG_DBG_SELECT \
+ 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+ 0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+ 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID \
+ 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME \
+ 0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+ 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+ 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+ 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+ 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+ 0x4c1610UL
+#define YULD_REG_DBG_SELECT \
+ 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE \
+ 0x4c9604UL
+#define YULD_REG_DBG_SHIFT \
+ 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID \
+ 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME \
+ 0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+ 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+ 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+ 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+ 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+ 0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+ 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+ 0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+ 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+ 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+ 0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+ 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+ 0x502144UL
+#define NIG_REG_DBG_SHIFT \
+ 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID \
+ 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME \
+ 0x502150UL
+#define BMB_REG_DBG_SELECT \
+ 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+ 0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+ 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID \
+ 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME \
+ 0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+ 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+ 0x560104UL
+#define PTU_REG_DBG_SHIFT \
+ 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID \
+ 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME \
+ 0x560110UL
+#define CDU_REG_DBG_SELECT \
+ 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+ 0x580708UL
+#define CDU_REG_DBG_SHIFT \
+ 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID \
+ 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME \
+ 0x580714UL
+#define WOL_REG_DBG_SELECT \
+ 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE \
+ 0x600144UL
+#define WOL_REG_DBG_SHIFT \
+ 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID \
+ 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME \
+ 0x600150UL
+#define BMBN_REG_DBG_SELECT \
+ 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE \
+ 0x610144UL
+#define BMBN_REG_DBG_SHIFT \
+ 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID \
+ 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME \
+ 0x610150UL
+#define NWM_REG_DBG_SELECT \
+ 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE \
+ 0x8000f0UL
+#define NWM_REG_DBG_SHIFT \
+ 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID \
+ 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME \
+ 0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+ 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+ 0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+ 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID \
+ 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME \
+ 0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+ 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+ 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+ 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+ 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+ 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+ 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+ 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+ 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+ 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+ 0xda4738UL
+#define BTB_REG_DBG_SELECT \
+ 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+ 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+ 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID \
+ 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME \
+ 0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+ 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+ 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+ 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+ 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+ 0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+ 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+ 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+ 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+ 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+ 0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+ 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+ 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+ 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+ 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+ 0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+ 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+ 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+ 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+ 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+ 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+ 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+ 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+ 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+ 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+ 0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+ 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+ 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+ 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+ 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+ 0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+ 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+ 0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+ 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID \
+ 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME \
+ 0x1000050UL
+#define YCM_REG_DBG_SELECT \
+ 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+ 0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+ 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID \
+ 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME \
+ 0x1080050UL
+#define PCM_REG_DBG_SELECT \
+ 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+ 0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+ 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID \
+ 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME \
+ 0x1100050UL
+#define TCM_REG_DBG_SELECT \
+ 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+ 0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+ 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID \
+ 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME \
+ 0x1180050UL
+#define MCM_REG_DBG_SELECT \
+ 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+ 0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+ 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID \
+ 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME \
+ 0x1200050UL
+#define UCM_REG_DBG_SELECT \
+ 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+ 0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+ 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID \
+ 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME \
+ 0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+ 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+ 0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+ 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+ 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+ 0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+ 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+ 0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+ 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+ 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+ 0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+ 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+ 0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+ 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+ 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+ 0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+ 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+ 0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+ 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+ 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+ 0x1701538UL
+#define MSEM_REG_DBG_SELECT \
+ 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+ 0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+ 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+ 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+ 0x1801538UL
+#define USEM_REG_DBG_SELECT \
+ 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+ 0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+ 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+ 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+ 0x1901538UL
+#define PCIE_REG_DBG_COMMON_SELECT \
+ 0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+ 0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT \
+ 0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+ 0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+ 0x0543a8UL
+#define MISC_REG_RESET_PL_UA \
+ 0x008050UL
+#define MISC_REG_RESET_PL_HV \
+ 0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+ 0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+ 0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+ 0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+ 0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+ 0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+ 0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+ 0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+ 0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+ 0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+ 0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+ 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+ 0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+ 0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+ 0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+ 0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+ 0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+ 0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+ 0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+ 0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+ 0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+ 0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+ 0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+ 0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+ 0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+ 0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY \
+ 0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY \
+ 0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+ 0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE \
+ 0x1401408UL
+#define XSEM_REG_DBG_MODE1_CFG \
+ 0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY \
+ 0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+ 0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE \
+ 0x1501408UL
+#define YSEM_REG_DBG_MODE1_CFG \
+ 0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+ 0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY \
+ 0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY \
+ 0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+ 0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE \
+ 0x1601408UL
+#define PSEM_REG_DBG_MODE1_CFG \
+ 0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+ 0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY \
+ 0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY \
+ 0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+ 0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE \
+ 0x1701408UL
+#define TSEM_REG_DBG_MODE1_CFG \
+ 0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY \
+ 0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY \
+ 0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+ 0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE \
+ 0x1801408UL
+#define MSEM_REG_DBG_MODE1_CFG \
+ 0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+ 0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY \
+ 0x1901140UL
+#define USEM_REG_SYNC_DBG_EMPTY \
+ 0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+ 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+ 0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE \
+ 0x1901408UL
+#define USEM_REG_DBG_MODE1_CFG \
+ 0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+ 0x1940000UL
+#define SEM_FAST_REG_INT_RAM \
+ 0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+ 0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+ 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+ 0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+ 0x181520UL
+#define MCP_REG_CPU_MODE \
+ 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT \
+ (0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+ 0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+ 0x341500UL
+#define SEM_FAST_REG_STALL_0 \
+ 0x000488UL
+#define SEM_FAST_REG_STALLED \
+ 0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+ 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+ 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+ 0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+ 0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+ 0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+ 0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+ 0x009074UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
+#define MCP_REG_CPU_REG_FILE \
+ 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+ 32
+#define DBG_REG_DEBUG_TARGET \
+ 0x01005cUL
+#define DBG_REG_FULL_MODE \
+ 0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+ 0x010480UL
+#define GRC_REG_TRACE_FIFO \
+ 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+ 0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+ 0x010454UL
+#define DBG_REG_FRAMING_MODE \
+ 0x010058UL
+#define SEM_FAST_REG_VFC_DATA_WR \
+ 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+ 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+ 0x000b48UL
+#define RSS_REG_RSS_RAM_DATA \
+ 0x238c20UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define NWS_REG_NWS_CMU \
+ 0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+ 0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+ 0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+ 0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+ 0x0006c4UL
+#define MS_REG_MS_CMU \
+ 0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_REG_PHY0 \
+ 0x620000UL
+#define PHY_PCIE_REG_PHY1 \
+ 0x624000UL
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000000..23430059471c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,2954 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/bitops.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+#include "qed_ll2.h"
+
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
+ p_eqe->opcode, &p_eqe->data);
+}
+
+static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap) {
+ DP_NOTICE(p_hwfn,
+ "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
+ return -ENOMEM;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
+ bmap->bitmap);
+ return 0;
+}
+
+static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
+
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+
+ if (*id_num >= bmap->max_count) {
+ DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
+ bmap->max_count);
+ return -EINVAL;
+ }
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ return 0;
+}
+
+static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ return;
+ }
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
+{
+ return QED_CAU_DEF_RX_TIMER_RES;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
+ rc);
+ goto free_rdma_info;
+ }
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
+ rc);
+ goto free_rdma_dev;
+ }
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ kfree(p_rdma_info->cid_map.bitmap);
+ kfree(p_rdma_info->tid_map.bitmap);
+ kfree(p_rdma_info->toggle_bits.bitmap);
+ kfree(p_rdma_info->cq_map.bitmap);
+ kfree(p_rdma_info->dpi_map.bitmap);
+ kfree(p_rdma_info->pd_map.bitmap);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+ p_cnq_params->sb_num =
+ cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ return p_port;
+}
+
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params, u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_resize_cq(void *rdma_cxt,
+ struct qed_rdma_resize_cq_in_params *in_params,
+ struct qed_rdma_resize_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_resize_cq_output_params *p_ramrod_res;
+ struct rdma_resize_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ u8 fw_return_code;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_resize_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed resize cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_RESIZE_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_resize_cq;
+
+ p_ramrod->flags = 0;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
+ in_params->icid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
+ in_params->pbl_two_level);
+
+ p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
+ p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
+ p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ goto err;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
+ out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
+
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+ DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->
+ p_rdma_info->proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ __le32 *dst_gid)
+{
+ u32 i;
+
+ if (qp->roce_mode == ROCE_V2_IPV4) {
+ /* The IPv4 addresses shall be aligned to the highest word.
+ * The lower words must be zero.
+ */
+ memset(src_gid, 0, sizeof(union qed_gid));
+ memset(dst_gid, 0, sizeof(union qed_gid));
+ src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+ dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+ } else {
+ /* GIDs and IPv6 addresses coincide in location and size */
+ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+ src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+ dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+ }
+ }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+ enum roce_flavor flavor;
+
+ switch (roce_mode) {
+ case ROCE_V1:
+ flavor = PLAIN_ROCE;
+ break;
+ case ROCE_V2_IPV4:
+ flavor = RROCE_IPV4;
+ break;
+ case ROCE_V2_IPV6:
+ flavor = ROCE_V2_IPV6;
+ break;
+ default:
+ flavor = MAX_ROCE_MODE;
+ break;
+ }
+ return flavor;
+}
+
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 responder_icid;
+ u32 requester_icid;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &responder_icid);
+ if (rc) {
+ spin_unlock_bh(&p_rdma_info->lock);
+ return rc;
+ }
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ if (rc)
+ goto err;
+
+ /* the two icid's should be adjacent */
+ if ((requester_icid - responder_icid) != 1) {
+ DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+ requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+
+ /* If these icids require a new ILT line allocate DMA-able context for
+ * an ILT page
+ */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+ if (rc)
+ goto err;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+ if (rc)
+ goto err;
+
+ *cid = (u16)responder_icid;
+ return rc;
+
+err:
+ spin_lock_bh(&p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Allocate CID - failed, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for IRQ */
+ qp->irq_num_pages = 1;
+ qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->irq_phys_addr, GFP_KERNEL);
+ if (!qp->irq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->irq_num_pages = qp->irq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->rq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
+ rc, physical_queue0);
+
+ if (rc)
+ goto err;
+
+ qp->resp_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for ORQ */
+ qp->orq_num_pages = 1;
+ qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->orq_phys_addr, GFP_KERNEL);
+ if (!qp->orq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->orq_num_pages = qp->orq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->sq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+
+ if (rc)
+ goto err;
+
+ qp->req_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+ return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_sqd,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !(qp->req_offloaded))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+ qp->sqd_async);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_invalidated_mw)
+{
+ struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+ struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+ p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+
+ if (!p_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+
+ /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ qp->resp_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct roce_destroy_qp_resp_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_bound_mw)
+{
+ struct roce_destroy_qp_req_output_params *p_ramrod_res;
+ struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->req_offloaded)
+ return 0;
+
+ p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+
+ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+
+ qp->req_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+ struct roce_query_qp_req_output_params *p_req_ramrod_res;
+ struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+ struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t resp_ramrod_res_phys;
+ dma_addr_t req_ramrod_res_phys;
+ struct qed_spq_entry *p_ent;
+ bool rq_err_state;
+ bool sq_err_state;
+ bool sq_draining;
+ int rc = -ENOMEM;
+
+ if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+ /* We can't send ramrod to the fw since this qp wasn't offloaded
+ * to the fw yet
+ */
+ out_params->draining = false;
+ out_params->rq_psn = qp->rq_psn;
+ out_params->sq_psn = qp->sq_psn;
+ out_params->state = qp->cur_state;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+ return 0;
+ }
+
+ if (!(qp->resp_offloaded)) {
+ DP_NOTICE(p_hwfn,
+ "The responder's qp should be offloded before requester's\n");
+ return -EINVAL;
+ }
+
+ /* Send a query responder ramrod to FW to get RQ-PSN and state */
+ p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
+ if (!p_resp_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_resp;
+
+ p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+ DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_resp;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+ out_params->state = qp->cur_state;
+
+ return 0;
+ }
+
+ /* Send a query requester ramrod to FW to get SQ-PSN and state */
+ p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_req_ramrod_res),
+ &req_ramrod_res_phys,
+ GFP_KERNEL);
+ if (!p_req_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ init_data.cid = qp->icid + 1;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_req;
+
+ p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+ DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_req;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+ sq_draining =
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+ else if (sq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_SQE;
+ else if (sq_draining)
+ out_params->draining = true;
+ out_params->state = qp->cur_state;
+
+ return 0;
+
+err_req:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+ return rc;
+err_resp:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+ return rc;
+}
+
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u32 num_invalidated_mw = 0;
+ u32 num_bound_mw = 0;
+ u32 start_cid;
+ int rc;
+
+ /* Destroys the specified QP */
+ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+ DP_NOTICE(p_hwfn,
+ "QP must be in error, reset or init state before destroying it\n");
+ return -EINVAL;
+ }
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
+ if (rc)
+ return rc;
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+
+ /* Release responder's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid - start_cid);
+
+ /* Release requester's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid + 1 - start_cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return 0;
+}
+
+int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
+ return NULL;
+ }
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ u32 num_invalidated_mw = 0, num_bound_mw = 0;
+ int rc = 0;
+
+ /* Perform additional operations according to the current state and the
+ * next state
+ */
+ if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+ (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+ /* Init->RTR or Reset->RTR */
+ rc = qed_roce_sp_create_responder(p_hwfn, qp);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTR-> RTS */
+ rc = qed_roce_sp_create_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+
+ /* Send modify responder ramrod */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTS->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* RTS->SQD */
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* SQD->SQD */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* SQD->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
+ qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ /* ->ERR */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+ params->modify_flags);
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+ /* Any state -> RESET */
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+ &num_bound_mw);
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ }
+
+ return rc;
+}
+
+int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
+ p_hwfn->p_rdma_info->last_tid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_roce_ll2_packet *packet = cookie;
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+ roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+ cookie, first_frag_addr,
+ b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet)
+{
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+ struct qed_roce_ll2_rx_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_roce_ll2_packet pkt;
+
+ DP_VERBOSE(cdev,
+ QED_MSG_LL2,
+ "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+ (void *)(uintptr_t)rx_buf_addr,
+ data_length, data_length_error);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.n_seg = 1;
+ pkt.payload[0].baddr = rx_buf_addr;
+ pkt.payload[0].len = data_length;
+
+ memset(&params, 0, sizeof(params));
+ params.vlan_id = vlan;
+ *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+ *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+ if (data_length_error) {
+ DP_ERR(cdev,
+ "roce ll2 rx complete: data length error %d, length=%d\n",
+ data_length_error, data_length);
+ params.rc = -EINVAL;
+ }
+
+ roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev,
+ "qed roce mac filter failed - roce_info/ll2 NULL\n");
+ return -EINVAL;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwfn->ll2->lock);
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ new_mac_address);
+ mutex_unlock(&hwfn->ll2->lock);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+ return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2;
+ struct qed_ll2_info ll2_params;
+ int rc;
+
+ if (!params) {
+ DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+ return -EINVAL;
+ }
+ if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+ params->cbs.tx_cb, params->cbs.rx_cb);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(params->mac_address)) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+ params->mac_address);
+ return -EINVAL;
+ }
+
+ /* Initialize */
+ roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+ if (!roce_ll2) {
+ DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+ return -ENOMEM;
+ }
+ memset(roce_ll2, 0, sizeof(*roce_ll2));
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+ roce_ll2->cbs = params->cbs;
+ roce_ll2->cb_cookie = params->cb_cookie;
+ mutex_init(&roce_ll2->lock);
+
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+ ll2_params.mtu = params->mtu;
+ ll2_params.rx_drop_ttl0_flg = true;
+ ll2_params.rx_vlan_removal_en = false;
+ ll2_params.tx_dest = CORE_TX_DEST_NW;
+ ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+ ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+ ll2_params.gsi_enable = true;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+ params->max_rx_buffers,
+ params->max_tx_buffers,
+ &roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ hwfn->ll2 = roce_ll2;
+
+ rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+ if (rc) {
+ hwfn->ll2 = NULL;
+ goto err2;
+ }
+ ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+ return 0;
+
+err2:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+ kfree(roce_ll2);
+ return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ int rc;
+
+ if (!cdev) {
+ DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
+ return -EINVAL;
+ }
+
+ if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+ return -EINVAL;
+ }
+
+ /* remove LL2 MAC address filter */
+ rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+ eth_zero_addr(roce_ll2->mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+ rc);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ kfree(roce_ll2);
+
+ return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_tx_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ enum qed_ll2_roce_flavor_type qed_roce_flavor;
+ u8 flags = 0;
+ int rc;
+ int i;
+
+ if (!cdev || !pkt || !params) {
+ DP_ERR(cdev,
+ "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+ cdev, pkt, params);
+ return -EINVAL;
+ }
+
+ qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+ : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ /* Tx header */
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+ 1 + pkt->n_seg, 0, flags, 0,
+ qed_roce_flavor, pkt->header.baddr,
+ pkt->header.len, pkt, 1);
+ if (rc) {
+ DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_HEAD_FAILURE;
+ }
+
+ /* Tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+ if (rc) {
+ /* If failed not much to do here, partial packet has
+ * been posted * we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(cdev,
+ "roce ll2 tx: payload failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_FRAG_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw)
+{
+ return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->ll2->handle,
+ buf->baddr, buf->len,
+ (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle, stats);
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .roce_ll2_start = &qed_roce_ll2_start,
+ .roce_ll2_stop = &qed_roce_ll2_stop,
+ .roce_ll2_tx = &qed_roce_ll2_tx,
+ .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+ .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .roce_ll2_stats = &qed_roce_ll2_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops()
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000000..2f091e8a0f40
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,216 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_ll2.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_resize_cq_in_params {
+ u16 icid;
+ u32 cq_size;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+};
+
+struct qed_rdma_resize_cq_out_params {
+ u32 prod;
+ u32 cons;
+};
+
+struct qed_rdma_resize_cnq_in_params {
+ u32 cnq_id;
+ u32 pbl_page_size_log;
+ u64 pbl_ptr;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+int
+qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params);
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
+void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
+int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params);
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
+int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
+int qed_rdma_stop(void *rdma_cxt);
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
+u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe);
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
+int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params);
+int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+#endif
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index a342bfe4280d..9b7678f26909 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -2,6 +2,7 @@
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_sp.h"
+#include "qed_selftest.h"
int qed_selftest_memory(struct qed_dev *cdev)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index a548504c3420..652c90819758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -61,6 +61,10 @@ union ramrod_data {
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct rdma_init_func_ramrod_data rdma_init_func;
@@ -81,6 +85,7 @@ union ramrod_data {
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+ struct roce_init_func_ramrod_data roce_init_func;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a52f3fc051f5..2888eb0628f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -25,9 +25,7 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u8 cmd,
- u8 protocol,
- struct qed_sp_init_data *p_data)
+ u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
rc = qed_spq_get_entry(p_hwfn, pp_ent);
- if (rc != 0)
+ if (rc)
return rc;
p_ent = *pp_ent;
@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
- PROTOCOLID_COMMON,
- &init_data);
+ PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
- qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
- &p_ramrod->tunnel_config);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index,
- p_ramrod->outer_tag);
+ sb, sb_index, p_ramrod->outer_tag);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d73456eab1d7..caff41544898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,6 +28,9 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#include "qed_roce.h"
+#endif
/***************************************************************************
* Structures & Definitions
@@ -41,8 +44,7 @@
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
- union event_ring_data *data,
- u8 fw_return_code)
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
@@ -109,9 +111,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
{
p_ent->flags = 0;
@@ -189,8 +190,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
- struct qed_spq *p_spq,
- struct qed_spq_entry *p_ent)
+ struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -240,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ case PROTOCOLID_ROCE:
+ qed_async_roce_event(p_hwfn, p_eqe);
+ return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -255,8 +260,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
/***************************************************************************
* EQ API
***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
- u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +271,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
mmiowb();
}
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
- void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
@@ -323,17 +325,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
- if (!p_eq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ if (!p_eq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -342,17 +341,12 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ &p_eq->chain))
goto eq_allocate_fail;
- }
/* register EQ completion on the SP SB */
- qed_int_register_cb(p_hwfn,
- qed_eq_completion,
- p_eq,
- &p_eq->eq_sb_index,
- &p_eq->p_fw_cons);
+ qed_int_register_cb(p_hwfn, qed_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
@@ -361,14 +355,12 @@ eq_allocate_fail:
return NULL;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
if (!p_eq)
return;
@@ -379,10 +371,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
-static int qed_cqe_completion(
- struct qed_hwfn *p_hwfn,
- struct eth_slow_path_rx_cqe *cqe,
- enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
@@ -463,12 +454,9 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
u32 capacity;
/* SPQ struct */
- p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
- if (!p_spq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ if (!p_spq)
return -ENOMEM;
- }
/* SPQ ring */
if (qed_chain_alloc(p_hwfn->cdev,
@@ -477,18 +465,14 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ &p_spq->chain))
goto spq_allocate_fail;
- }
/* allocate and fill the SPQ elements (incl. ramrod data list) */
capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- capacity *
- sizeof(struct qed_spq_entry),
+ capacity * sizeof(struct qed_spq_entry),
&p_phys, GFP_KERNEL);
-
if (!p_virt)
goto spq_allocate_fail;
@@ -525,9 +509,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
kfree(p_spq);
}
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +520,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
@@ -564,8 +547,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +568,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -604,8 +585,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_en2->list);
/* Copy the ring element physical pointer to the new
@@ -655,8 +635,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
- struct list_head *head,
- u32 keep_reserve)
+ struct list_head *head, u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
@@ -690,8 +669,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
if (!p_ent)
return -EINVAL;
@@ -705,8 +683,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *fw_return_code)
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -803,8 +780,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return -EINVAL;
spin_lock_bh(&p_spq->lock);
- list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
- list) {
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
@@ -846,15 +822,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
if (!found) {
DP_NOTICE(p_hwfn,
- "Failed to find an entry this EQE completes\n");
+ "Failed to find an entry this EQE [echo %04x] completes\n",
+ le16_to_cpu(echo));
return -EEXIST;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -878,10 +861,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
- if (!p_consq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ if (!p_consq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -889,10 +870,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ 0x80, &p_consq->chain))
goto consq_allocate_fail;
- }
return p_consq;
@@ -901,14 +880,12 @@ consq_allocate_fail:
return NULL;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
if (!p_consq)
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 15399da268d9..d2d6621fe0e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
}
fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
- if (fp_minor > ETH_HSI_VER_MINOR) {
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
@@ -107,8 +108,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
+static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -185,8 +186,8 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
-int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
- int vfid, struct qed_ptt *p_ptt)
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
{
struct qed_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
@@ -454,10 +455,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
}
p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
- if (!p_sriov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_sriov)
return -ENOMEM;
- }
p_hwfn->pf_iov_info = p_sriov;
@@ -506,10 +505,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
/* Allocate a new struct for IOV information */
cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
- if (!cdev->p_iov_info) {
- DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ if (!cdev->p_iov_info)
return -ENOMEM;
- }
+
cdev->p_iov_info->pos = pos;
rc = qed_iov_pci_cfg_info(cdev);
@@ -575,7 +573,7 @@ static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
}
}
-void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
{
u16 i;
@@ -699,7 +697,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
- val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
@@ -1090,13 +1088,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
- if (!(tlvs_mask & (1 << i)))
+ if (!(tlvs_mask & BIT(i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
- if (tlvs_accepted & (1 << i))
+ if (tlvs_accepted & BIT(i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1132,9 +1130,10 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
-struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
@@ -1145,7 +1144,7 @@ struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
return &vf->p_vf_info;
}
-void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
{
struct qed_public_vf_info *vf_info;
@@ -1241,6 +1240,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
return PFVF_STATUS_NO_RESOURCE;
}
@@ -1280,22 +1289,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overriden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
- DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
- vf->abs_vf_id,
- req->vfdev_info.eth_fp_hsi_major,
- req->vfdev_info.eth_fp_hsi_minor,
- ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
-
- /* Write the PF version so that VF would know which version
- * is supported.
- */
- pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
- pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
- goto out;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
}
/* On 100g PFs, prevent old VFs from loading */
@@ -1334,8 +1363,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
- pfdev_info->minor_fp_hsi = min_t(u8,
- ETH_HSI_VER_MINOR,
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1470,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = qed_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- QED_SPQ_MODE_CB, NULL);
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1492,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ if ((events & BIT(VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
@@ -1479,7 +1508,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if (events & BIT(MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1502,7 +1531,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
- if (events & (1 << VLAN_ADDR_FORCED)) {
+ if (events & BIT(VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
@@ -1572,7 +1601,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
- p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
@@ -1619,8 +1648,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
- vf->igu_sbs[sb_id],
- vf->abs_vf_id, 1);
+ vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
@@ -1632,7 +1660,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
- if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1650,9 +1678,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
+ params.check_mac = true;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
@@ -1679,7 +1708,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
@@ -1695,21 +1724,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *vf, u8 status)
+ struct qed_vf_info *vf,
+ u8 status, bool b_legacy)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone,
@@ -1717,7 +1757,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
- qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
@@ -1728,6 +1768,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
+ bool b_legacy_vf = false;
int rc;
memset(&params, 0, sizeof(params));
@@ -1743,13 +1784,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ b_legacy_vf = true;
+ } else {
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+ }
+
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->bd_max_bytes,
req->rxq_addr,
- req->cqe_pbl_addr, req->cqe_pbl_size);
+ req->cqe_pbl_addr, req->cqe_pbl_size,
+ b_legacy_vf);
if (rc) {
status = PFVF_STATUS_FAILURE;
@@ -1760,7 +1815,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
@@ -1769,23 +1824,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
- p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
+ p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
}
- qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
@@ -2045,7 +2115,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
- if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
@@ -2340,7 +2410,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2444,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
int i;
/* If we're in forced-mode, we don't allow any change */
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
/* First remove entries and then add new ones */
@@ -2441,8 +2511,8 @@ qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
- int vfid, struct qed_filter_ucast *params)
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
{
struct qed_public_vf_info *vf;
@@ -2509,7 +2579,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
}
/* Determine if the unicast filtering is acceptible by PF */
- if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2591,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2749,7 +2819,7 @@ cleanup:
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
- ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
@@ -2759,7 +2829,8 @@ cleanup:
return rc;
}
-int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ack_vfs[VF_MAX_STATIC / 32];
int rc = 0;
@@ -2805,7 +2876,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
continue;
vfid = p_vf->abs_vf_id;
- if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
@@ -2946,7 +3017,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
{
u64 add_bit = 1ULL << (vfid % 64);
@@ -3064,14 +3135,13 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
-void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
- u16 pvid, int vfid)
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
@@ -3104,7 +3174,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
return !!p_vf_info->vport_instance;
}
-bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3126,7 +3196,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
return vf_info->spoof_chk;
}
-int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
{
struct qed_vf_info *vf;
int rc = -EINVAL;
@@ -3163,13 +3233,14 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
}
-u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
@@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
@@ -3201,7 +3272,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
{
struct qed_vf_info *vf;
u8 vport_id;
@@ -3760,7 +3832,8 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
-void qed_iov_pf_task(struct work_struct *work)
+static void qed_iov_pf_task(struct work_struct *work)
+
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 9b780b31b15c..abf5bf11f865 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
return p_tlv;
}
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
@@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = -EBUSY;
- goto exit;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
-exit:
- mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
-
return rc;
}
@@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn,
QED_MSG_IOV, "attempting to acquire resources\n");
+ /* Clear response buffer, as this might be a re-send */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
@@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* PF agrees to allocate our resources */
if (!(resp->pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
- DP_INFO(p_hwfn,
- "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
- return -EINVAL;
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
resources_acquired = true;
@@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
attempts < VF_ACQUIRE_THRESH) {
qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = -EINVAL;
+ goto exit;
+ }
- /* Clear response buffer */
- memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
- } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
- pfdev_info->major_fp_hsi &&
- (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
- DP_NOTICE(p_hwfn,
- "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
- pfdev_info->major_fp_hsi,
- pfdev_info->minor_fp_hsi,
- ETH_HSI_VER_MAJOR,
- ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
- return -EINVAL;
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn,
+ "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to see if it supports FW-version override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+ rc = -EINVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto exit;
}
}
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
- if (ETH_HSI_VER_MINOR &&
+ if (!p_iov->b_pre_fp_hsi &&
+ ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
}
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
@@ -286,31 +331,23 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
/* Allocate vf sriov info */
p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
- if (!p_iov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_iov)
return -ENOMEM;
- }
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
&p_iov->vf2pf_request_phys,
GFP_KERNEL);
- if (!p_iov->vf2pf_request) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `vf2pf_request' DMA memory\n");
+ if (!p_iov->vf2pf_request)
goto free_p_iov;
- }
p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
&p_iov->pf2vf_reply_phys,
GFP_KERNEL);
- if (!p_iov->pf2vf_reply) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `pf2vf_reply' DMA memory\n");
+ if (!p_iov->pf2vf_reply)
goto free_vf2pf_request;
- }
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -347,6 +384,9 @@ free_p_iov:
return -ENOMEM;
}
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_qid,
@@ -374,6 +414,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (pp_prod && p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -381,13 +436,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
/* Learn the address of the producer from the response */
- if (pp_prod) {
+ if (pp_prod && !p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -399,6 +456,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -424,10 +483,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -470,13 +534,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
if (pp_doorbell) {
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ u32 db_addr;
+
+ db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ db_addr;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
tx_queue_id, *pp_doorbell, resp->offset);
}
exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -501,10 +579,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -543,10 +626,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -567,10 +655,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -770,13 +863,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
@@ -797,14 +895,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
p_hwfn->b_int_enabled = 0;
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
@@ -828,6 +931,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
+ qed_vf_pf_req_end(p_hwfn, rc);
+
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
@@ -896,12 +1001,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
@@ -920,12 +1030,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
@@ -1071,8 +1186,8 @@ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
return false;
}
-bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
- u8 *dst_mac, u8 *p_is_forced)
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
{
struct qed_bulletin_content *bulletin;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b23ce58e932f..35db7a28aa13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
@@ -551,6 +551,11 @@ struct qed_vf_iov {
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
};
#ifdef CONFIG_QED_SRIOV