diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
27 files changed, 4801 insertions, 692 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 967acf322c09..729e43768e99 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 653bb5735f0c..44c184ebe3b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 +#define ISCSI_BDQ_ID(_port_id) (_port_id) #define QED_WID_SIZE (1024) #define QED_PF_DEMS_SIZE (4) @@ -154,7 +155,10 @@ struct qed_qm_iids { u32 tids; }; -enum QED_RESOURCES { +/* HW / FW resources, output of features supported below, most information + * is received from MFW. + */ +enum qed_resources { QED_SB, QED_L2_QUEUE, QED_VPORT, @@ -166,6 +170,7 @@ enum QED_RESOURCES { QED_RDMA_CNQ_RAM, QED_ILT, QED_LL2_QUEUE, + QED_CMDQS_CQS, QED_RDMA_STATS_QUEUE, QED_MAX_RESC, }; @@ -174,6 +179,7 @@ enum QED_FEATURE { QED_PF_L2_QUE, QED_VF, QED_RDMA_CNQ, + QED_VF_L2_QUE, QED_MAX_FEATURES, }; @@ -195,6 +201,11 @@ enum qed_dev_cap { QED_DEV_CAP_ROCE, }; +enum qed_wol_support { + QED_WOL_SUPPORT_NONE, + QED_WOL_SUPPORT_PME, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -226,15 +237,9 @@ struct qed_hw_info { u32 port_mode; u32 hw_mode; unsigned long device_capabilities; -}; + u16 mtu; -struct qed_hw_cid_data { - u32 cid; - bool b_cid_allocated; - - /* Additional identifiers */ - u16 opaque_fid; - u8 vport_id; + enum qed_wol_support b_wol_support; }; /* maximun size of read/write commands (HW limit) */ @@ -378,7 +383,9 @@ struct qed_hwfn { /* Protocol related */ bool using_ll2; struct qed_ll2_info *p_ll2_info; + struct qed_ooo_info *p_ooo_info; struct qed_rdma_info *p_rdma_info; + struct qed_iscsi_info *p_iscsi_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -403,9 +410,6 @@ struct qed_hwfn { struct qed_dcbx_info *p_dcbx_info; - struct qed_hw_cid_data *p_tx_cids; - struct qed_hw_cid_data *p_rx_cids; - struct qed_dmae_info dmae_info; /* QM init */ @@ -538,7 +542,9 @@ struct qed_dev { u8 mcp_rev; u8 boot_mode; - u8 wol; + /* WoL related configurations */ + u8 wol_config; + u8 wol_mac[ETH_ALEN]; u32 int_mode; enum qed_coalescing_mode int_coalescing_mode; @@ -578,6 +584,8 @@ struct qed_dev { /* Linux specific here */ struct qede_dev *edev; struct pci_dev *pdev; + u32 flags; +#define QED_FLAG_STORAGE_STARTED (BIT(0)) int msg_enable; struct pci_params pci_params; @@ -591,6 +599,7 @@ struct qed_dev { union { struct qed_common_cb_ops *common; struct qed_eth_cb_ops *eth; + struct qed_iscsi_cb_ops *iscsi; } protocol_ops; void *ops_cookie; @@ -600,7 +609,7 @@ struct qed_dev { struct qed_cb_ll2_info *ll2; u8 ll2_mac_address[ETH_ALEN]; #endif - + DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; u32 rdma_max_sge; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index edae5fc5fccd..3b2250021c5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -29,8 +29,10 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" +#include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" @@ -137,15 +139,6 @@ void qed_resc_free(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - kfree(p_hwfn->p_tx_cids); - p_hwfn->p_tx_cids = NULL; - kfree(p_hwfn->p_rx_cids); - p_hwfn->p_rx_cids = NULL; - } - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); qed_spq_free(p_hwfn); @@ -155,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); + qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); + } qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); @@ -411,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { + struct qed_iscsi_info *p_iscsi_info; + struct qed_ooo_info *p_ooo_info; #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; #endif @@ -425,23 +424,6 @@ int qed_resc_alloc(struct qed_dev *cdev) if (!cdev->fw_data) return -ENOMEM; - /* Allocate Memory for the Queue->CID mapping */ - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - int tx_size = sizeof(struct qed_hw_cid_data) * - RESC_NUM(p_hwfn, QED_L2_QUEUE); - int rx_size = sizeof(struct qed_hw_cid_data) * - RESC_NUM(p_hwfn, QED_L2_QUEUE); - - p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); - if (!p_hwfn->p_tx_cids) - goto alloc_no_mem; - - p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); - if (!p_hwfn->p_rx_cids) - goto alloc_no_mem; - } - for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; @@ -533,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev) p_hwfn->p_ll2_info = p_ll2_info; } #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + p_iscsi_info = qed_iscsi_alloc(p_hwfn); + if (!p_iscsi_info) + goto alloc_no_mem; + p_hwfn->p_iscsi_info = p_iscsi_info; + p_ooo_info = qed_ooo_alloc(p_hwfn); + if (!p_ooo_info) + goto alloc_no_mem; + p_hwfn->p_ooo_info = p_ooo_info; + } /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); @@ -586,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev) if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); + qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); + } } } @@ -1057,8 +1053,10 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data) { - u32 load_code, param; - int rc, mfw_rc, i; + u32 load_code, param, drv_mb_param; + bool b_default_mtu = true; + struct qed_hwfn *p_hwfn; + int rc = 0, mfw_rc, i; if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -1074,6 +1072,12 @@ int qed_hw_init(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + /* If management didn't provide a default, set one of our own */ + if (!p_hwfn->hw_info.mtu) { + p_hwfn->hw_info.mtu = 1500; + b_default_mtu = false; + } + if (IS_VF(cdev)) { p_hwfn->b_int_enabled = 1; continue; @@ -1157,6 +1161,38 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->hw_init_done = true; } + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + drv_mb_param = (FW_MAJOR_VERSION << 24) | + (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | + (FW_ENGINEERING_VERSION); + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, + drv_mb_param, &load_code, ¶m); + if (rc) + DP_INFO(p_hwfn, "Failed to update firmware version\n"); + + if (!b_default_mtu) { + rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.mtu); + if (rc) + DP_INFO(p_hwfn, + "Failed to update default mtu\n"); + } + + rc = qed_mcp_ov_update_driver_state(p_hwfn, + p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_DISABLED); + if (rc) + DP_INFO(p_hwfn, "Failed to update driver state\n"); + + rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); + } + return 0; } @@ -1324,8 +1360,24 @@ int qed_hw_reset(struct qed_dev *cdev) { int rc = 0; u32 unload_resp, unload_param; + u32 wol_param; int i; + switch (cdev->wol_config) { + case QED_OV_WOL_DISABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; + break; + default: + DP_NOTICE(cdev, + "Unknown WoL configuration %02x\n", cdev->wol_config); + /* Fallthrough */ + case QED_OV_WOL_DEFAULT: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1354,8 +1406,7 @@ int qed_hw_reset(struct qed_dev *cdev) /* Send unload command to MCP */ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_UNLOAD_REQ, - DRV_MB_PARAM_UNLOAD_WOL_MCP, + DRV_MSG_CODE_UNLOAD_REQ, wol_param, &unload_resp, &unload_param); if (rc) { DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); @@ -1421,6 +1472,7 @@ static void get_function_id(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; + struct qed_sb_cnt_info sb_cnt_info; int num_features = 1; if (IS_ENABLED(CONFIG_QED_RDMA) && @@ -1439,53 +1491,257 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_L2_QUEUE)); - DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", - feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), - num_features); + + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + feat_num[QED_VF_L2_QUE] = + min_t(u32, + RESC_NUM(p_hwfn, QED_L2_QUEUE) - + FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", + (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + RESC_NUM(p_hwfn, QED_SB), num_features); } -static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) +static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id) +{ + enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; + + switch (res_id) { + case QED_SB: + mfw_res_id = RESOURCE_NUM_SB_E; + break; + case QED_L2_QUEUE: + mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; + break; + case QED_VPORT: + mfw_res_id = RESOURCE_NUM_VPORT_E; + break; + case QED_RSS_ENG: + mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; + break; + case QED_PQ: + mfw_res_id = RESOURCE_NUM_PQ_E; + break; + case QED_RL: + mfw_res_id = RESOURCE_NUM_RL_E; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + mfw_res_id = RESOURCE_VFC_FILTER_E; + break; + case QED_ILT: + mfw_res_id = RESOURCE_ILT_E; + break; + case QED_LL2_QUEUE: + mfw_res_id = RESOURCE_LL2_QUEUE_E; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + mfw_res_id = RESOURCE_CQS_E; + break; + case QED_RDMA_STATS_QUEUE: + mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; + break; + default: + break; + } + + return mfw_res_id; +} + +static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) { - u8 enabled_func_idx = p_hwfn->enabled_func_idx; - u32 *resc_start = p_hwfn->hw_info.resc_start; u8 num_funcs = p_hwfn->num_funcs_on_engine; - u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int i, max_vf_vlan_filters; + u32 dflt_resc_num = 0; - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + switch (res_id) { + case QED_SB: + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + dflt_resc_num = sb_cnt_info.sb_cnt; + break; + case QED_L2_QUEUE: + dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs; + break; + case QED_VPORT: + dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; + break; + case QED_RSS_ENG: + dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs; + break; + case QED_PQ: + /* The granularity of the PQs is 8 */ + dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs; + dflt_resc_num &= ~0x7; + break; + case QED_RL: + dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; + break; + case QED_ILT: + dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs; + break; + case QED_LL2_QUEUE: + dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; + break; + case QED_RDMA_STATS_QUEUE: + dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs; + break; + default: + break; + } -#ifdef CONFIG_QED_SRIOV - max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; -#else - max_vf_vlan_filters = 0; -#endif + return dflt_resc_num; +} + +static const char *qed_hw_get_resc_name(enum qed_resources res_id) +{ + switch (res_id) { + case QED_SB: + return "SB"; + case QED_L2_QUEUE: + return "L2_QUEUE"; + case QED_VPORT: + return "VPORT"; + case QED_RSS_ENG: + return "RSS_ENG"; + case QED_PQ: + return "PQ"; + case QED_RL: + return "RL"; + case QED_MAC: + return "MAC"; + case QED_VLAN: + return "VLAN"; + case QED_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; + case QED_ILT: + return "ILT"; + case QED_LL2_QUEUE: + return "LL2_QUEUE"; + case QED_CMDQS_CQS: + return "CMDQS_CQS"; + case QED_RDMA_STATS_QUEUE: + return "RDMA_STATS_QUEUE"; + default: + return "UNKNOWN_RESOURCE"; + } +} - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); +static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) +{ + u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param; + u32 *p_resc_num, *p_resc_start; + struct resource_info resc_info; + int rc; + + p_resc_num = &RESC_NUM(p_hwfn, res_id); + p_resc_start = &RESC_START(p_hwfn, res_id); + + /* Default values assumes that each function received equal share */ + dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id); + if (!dflt_resc_num) { + DP_ERR(p_hwfn, + "Failed to get default amount for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return -EINVAL; + } + dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx; + + memset(&resc_info, 0, sizeof(resc_info)); + resc_info.res_id = qed_hw_get_mfw_res_id(res_id); + if (resc_info.res_id == RESOURCE_NUM_INVALID) { + DP_ERR(p_hwfn, + "Failed to match resource %d [%s] with the MFW resources\n", + res_id, qed_hw_get_resc_name(res_id)); + return -EINVAL; + } + + rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info, + &mcp_resp, &mcp_param); + if (rc) { + DP_NOTICE(p_hwfn, + "MFW response failure for an allocation request for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } - resc_num[QED_SB] = min_t(u32, - (MAX_SB_PER_PATH_BB / num_funcs), - sb_cnt_info.sb_cnt); - resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; - resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; - resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; - resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; - resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]); - resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; - resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / - num_funcs; - resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; - resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; - resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs; - resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB / - num_funcs; - - for (i = 0; i < QED_MAX_RESC; i++) - resc_start[i] = resc_num[i] * enabled_func_idx; + /* Default driver values are applied in the following cases: + * - The resource allocation MB command is not supported by the MFW + * - There is an internal error in the MFW while processing the request + * - The resource ID is unknown to the MFW + */ + if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK && + mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) { + DP_NOTICE(p_hwfn, + "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n", + res_id, + qed_hw_get_resc_name(res_id), + mcp_resp, dflt_resc_num, dflt_resc_start); + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + goto out; + } + + /* Special handling for status blocks; Would be revised in future */ + if (res_id == QED_SB) { + resc_info.size -= 1; + resc_info.offset -= p_hwfn->enabled_func_idx; + } + + *p_resc_num = resc_info.size; + *p_resc_start = resc_info.offset; + +out: + /* PQs have to divide by 8 [that's the HW granularity]. + * Reduce number so it would fit. + */ + if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { + DP_INFO(p_hwfn, + "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", + *p_resc_num, + (*p_resc_num) & ~0x7, + *p_resc_start, (*p_resc_start) & ~0x7); + *p_resc_num &= ~0x7; + *p_resc_start &= ~0x7; + } + + return 0; +} + +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) +{ + u8 res_id; + int rc; + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { + rc = qed_hw_set_resc_info(p_hwfn, res_id); + if (rc) + return rc; + } /* Sanity for ILT */ - if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) { + if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) { DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", RESC_START(p_hwfn, QED_ILT), RESC_END(p_hwfn, QED_ILT) - 1); @@ -1495,34 +1751,12 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) qed_hw_set_feat(p_hwfn); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "The numbers for each resource are:\n" - "SB = %d start = %d\n" - "L2_QUEUE = %d start = %d\n" - "VPORT = %d start = %d\n" - "PQ = %d start = %d\n" - "RL = %d start = %d\n" - "MAC = %d start = %d\n" - "VLAN = %d start = %d\n" - "ILT = %d start = %d\n" - "LL2_QUEUE = %d start = %d\n", - p_hwfn->hw_info.resc_num[QED_SB], - p_hwfn->hw_info.resc_start[QED_SB], - p_hwfn->hw_info.resc_num[QED_L2_QUEUE], - p_hwfn->hw_info.resc_start[QED_L2_QUEUE], - p_hwfn->hw_info.resc_num[QED_VPORT], - p_hwfn->hw_info.resc_start[QED_VPORT], - p_hwfn->hw_info.resc_num[QED_PQ], - p_hwfn->hw_info.resc_start[QED_PQ], - p_hwfn->hw_info.resc_num[QED_RL], - p_hwfn->hw_info.resc_start[QED_RL], - p_hwfn->hw_info.resc_num[QED_MAC], - p_hwfn->hw_info.resc_start[QED_MAC], - p_hwfn->hw_info.resc_num[QED_VLAN], - p_hwfn->hw_info.resc_start[QED_VLAN], - p_hwfn->hw_info.resc_num[QED_ILT], - p_hwfn->hw_info.resc_start[QED_ILT], - RESC_NUM(p_hwfn, QED_LL2_QUEUE), - RESC_START(p_hwfn, QED_LL2_QUEUE)); + "The numbers for each resource are:\n"); + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", + qed_hw_get_resc_name(res_id), + RESC_NUM(p_hwfn, res_id), + RESC_START(p_hwfn, res_id)); return 0; } @@ -1801,6 +2035,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_num_funcs(p_hwfn, p_ptt); + if (qed_mcp_is_init(p_hwfn)) + p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; + return qed_hw_get_resc(p_hwfn); } @@ -1975,8 +2212,13 @@ int qed_hw_prepare(struct qed_dev *cdev, void qed_hw_remove(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int i; + if (IS_PF(cdev)) + qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_NOT_LOADED); + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -2037,12 +2279,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) { void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl.p_virt_table; + u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; if (!pp_virt_addr_tbl) return; - if (!p_chain->pbl.p_virt_table) + if (!p_pbl_virt) goto out; for (i = 0; i < page_cnt; i++) { @@ -2060,7 +2302,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; dma_free_coherent(&cdev->pdev->dev, pbl_size, - p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table); + p_chain->pbl_sp.p_virt_table, + p_chain->pbl_sp.p_phys_table); out: vfree(p_chain->pbl.pp_virt_addr_tbl); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 72eee29c677f..785ab03683eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -727,9 +727,6 @@ struct core_tx_bd_flags { #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 -#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 -#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 - }; struct core_tx_bd { @@ -8529,6 +8526,41 @@ struct mdump_config_stc { u32 valid_logs; }; +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT (1 << 0) +}; + union drv_union_data { u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; struct mcp_mac wol_mac; @@ -8546,9 +8578,9 @@ union drv_union_data { struct drv_version_stc drv_version; struct lan_stats_stc lan_stats; - u64 reserved_stats[11]; struct ocbb_data_stc ocbb_info; struct temperature_status_stc temp_info; + struct resource_info resource; struct bist_nvm_image_att nvm_image_att; struct mdump_config_stc mdump_config; }; @@ -8564,9 +8596,19 @@ struct public_drv_mb { #define DRV_MSG_CODE_INIT_PHY 0x22000000 #define DRV_MSG_CODE_LINK_RESET 0x23000000 #define DRV_MSG_CODE_SET_DCBX 0x25000000 +#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 +#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 +#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 +#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 +#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 +#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 +#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 @@ -8574,6 +8616,13 @@ struct public_drv_mb { #define DRV_MSG_CODE_MCP_RESET 0x00090000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 #define DRV_MSG_CODE_MCP_HALT 0x00100000 +#define DRV_MSG_CODE_SET_VMAC 0x00110000 +#define DRV_MSG_CODE_GET_VMAC 0x00120000 +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 #define DRV_MSG_CODE_GET_STATS 0x00130000 #define DRV_MSG_CODE_STATS_TYPE_LAN 1 @@ -8585,11 +8634,16 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 +#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 +#define DRV_MSG_CODE_OS_WOL 0x002e0000 #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 @@ -8602,13 +8656,59 @@ struct public_drv_mb { #define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 #define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + #define DRV_MB_PARAM_BIST_REGISTER_TEST 1 #define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 #define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 #define DRV_MB_PARAM_BIST_RC_PASSED 1 @@ -8617,6 +8717,8 @@ struct public_drv_mb { #define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 #define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -8631,15 +8733,27 @@ struct public_drv_mb { #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 #define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 #define FW_MSG_CODE_NVM_OK 0x00010000 #define FW_MSG_CODE_OK 0x00160000 +#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 +#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 fw_mb_param; + /* get pf rdma protocol command responce */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + u32 drv_pulse_mb; #define DRV_PULSE_SEQ_MASK 0x00007fff #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 2adedc6fb6cf..c68dbf7092b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master) case 9: return "DBU"; case 10: return "DMAE"; default: - return "Unkown"; + return "Unknown"; } } @@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) DORQ_REG_DB_DROP_DETAILS); DP_INFO(p_hwfn->cdev, - "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", + "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_DETAILS_ADDRESS), (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), @@ -3030,6 +3030,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } } } + + /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect + * the number of VF SBs [especially for first VF on engine, as we can't + * diffrentiate between empty entries and its entries]. + * Since we don't really support more SBs than VFs today, prevent any + * such configuration by sanitizing the number of SBs to equal the + * number of VFs. + */ + if (IS_PF_SRIOV(p_hwfn)) { + u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + if (total_vfs < p_igu_info->free_blks) { + DP_VERBOSE(p_hwfn, + (NETIF_MSG_INTR | QED_MSG_IOV), + "Limiting number of SBs for IOV - %04x --> %04x\n", + p_igu_info->free_blks, + p_hwfn->cdev->p_iov_info->total_vfs); + p_igu_info->free_blks = total_vfs; + } else if (total_vfs > p_igu_info->free_blks) { + DP_NOTICE(p_hwfn, + "IGU has only %04x SBs for VFs while the device has %04x VFs\n", + p_igu_info->free_blks, total_vfs); + return -EINVAL; + } + } p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; DP_VERBOSE( @@ -3163,7 +3188,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return sb_id - p_info->igu_base_sb; } else if ((sb_id >= p_info->igu_base_sb_iov) && (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { - return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; + /* We want the first VF queue to be adjacent to the + * last PF queue. Since L2 queues can be partial to + * SBs, we'll use the feature instead. + */ + return sb_id - p_info->igu_base_sb_iov + + FEAT_NUM(p_hwfn, QED_PF_L2_QUE); } else { DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c new file mode 100644 index 000000000000..17a70122df05 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -0,0 +1,1277 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_iscsi_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_reg_addr.h" + +struct qed_iscsi_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + + u8 layer_code; + u8 offl_flags; + u8 connect_mode; + u32 initial_ack; + dma_addr_t sq_pbl_addr; + struct qed_chain r2tq; + struct qed_chain xhq; + struct qed_chain uhq; + + struct tcp_upload_params *tcp_upload_params_virt_addr; + dma_addr_t tcp_upload_params_phys_addr; + struct scsi_terminate_extra_params *queue_cnts_virt_addr; + dma_addr_t queue_cnts_phys_addr; + dma_addr_t syn_phy_addr; + + u16 syn_ip_payload_length; + u8 local_mac[6]; + u8 remote_mac[6]; + u16 vlan_id; + u8 tcp_flags; + u8 ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_time; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u32 ts_ticks_per_second; + u16 da_timeout_value; + u8 ack_frequency; + + u8 update_flag; + u8 default_cq; + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; + u32 stat_sn; + u16 physical_q0; + u16 physical_q1; + u8 abortive_dsconnect; +}; + +static int +qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr, + void *event_context, iscsi_event_cb_t async_event_cb) +{ + struct iscsi_init_ramrod_params *p_ramrod = NULL; + struct scsi_init_func_queues *p_queue = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct iscsi_spe_func_init *p_init = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + u32 dval; + u16 val; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_init; + p_init = &p_ramrod->iscsi_init_spe; + p_params = &p_hwfn->pf_params.iscsi_pf_params; + p_queue = &p_init->q_params; + + SET_FIELD(p_init->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE); + p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC; + + val = p_params->half_way_close_timeout; + p_init->half_way_close_timeout = cpu_to_le16(val); + p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; + p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; + p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; + p_init->func_params.log_page_size = p_params->log_page_size; + val = p_params->num_tasks; + p_init->func_params.num_tasks = cpu_to_le16(val); + p_init->debug_mode.flags = p_params->debug_mode; + + DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, + p_params->glbl_q_params_addr); + + val = p_params->cq_num_entries; + p_queue->cq_num_entries = cpu_to_le16(val); + val = p_params->cmdq_num_entries; + p_queue->cmdq_num_entries = cpu_to_le16(val); + p_queue->num_queues = p_params->num_queues; + dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_queue->queue_relative_offset = (u8)dval; + p_queue->cq_sb_pi = p_params->gl_rq_pi; + p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; + + for (i = 0; i < p_params->num_queues; i++) { + val = p_hwfn->sbs_info[i]->igu_sb_id; + p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); + } + + p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], + p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] = + p_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + val = p_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA], + p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->rq_buffer_size; + p_queue->rq_buffer_size = cpu_to_le16(val); + if (p_params->is_target) { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); + val = p_params->tx_sws_timer; + p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); + p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; + + p_hwfn->p_iscsi_info->event_context = event_context; + p_hwfn->p_iscsi_info->event_cb = async_event_cb; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_offload *p_ramrod = NULL; + struct tcp_offload_params_opt2 *p_tcp2 = NULL; + struct tcp_offload_params *p_tcp = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + union qed_qm_pq_params pq_params; + u16 pq0_id = 0, pq1_id = 0; + dma_addr_t r2tq_pbl_addr; + dma_addr_t xhq_pbl_addr; + dma_addr_t uhq_pbl_addr; + int rc = 0; + u32 dval; + u16 wval; + u8 i; + u16 *p; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_offload; + + /* Transmission PQ is the first of the PF */ + memset(&pq_params, 0, sizeof(pq_params)); + pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); + p_conn->physical_q0 = cpu_to_le16(pq0_id); + p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id); + + /* iSCSI Pure-ACK PQ */ + pq_params.iscsi.q_idx = 1; + pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); + p_conn->physical_q1 = cpu_to_le16(pq1_id); + p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id); + + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN; + SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE, + p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + + DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr); + + r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); + DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr); + + xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr); + + uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr); + + p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack); + p_ramrod->iscsi.flags = p_conn->offl_flags; + p_ramrod->iscsi.default_cq = p_conn->default_cq; + p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn); + + if (!GET_FIELD(p_ramrod->iscsi.flags, + ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) { + p_tcp = &p_ramrod->tcp; + + p = (u16 *)p_conn->local_mac; + p_tcp->local_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p = (u16 *)p_conn->remote_mac; + p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); + + p_tcp->flags = p_conn->tcp_flags; + p_tcp->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp->local_ip[i] = cpu_to_le32(dval); + } + p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; + p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold; + + p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next); + p_tcp->snd_una = cpu_to_le32(p_conn->snd_una); + p_tcp->snd_next = cpu_to_le32(p_conn->snd_next); + p_tcp->snd_max = cpu_to_le32(p_conn->snd_max); + p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd); + p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd); + p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1); + p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); + p_tcp->srtt = cpu_to_le16(p_conn->srtt); + p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); + p_tcp->ts_time = cpu_to_le32(p_conn->ts_time); + p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); + p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); + p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); + dval = p_conn->ka_timeout_delta; + p_tcp->ka_timeout_delta = cpu_to_le32(dval); + dval = p_conn->rt_timeout_delta; + p_tcp->rt_timeout_delta = cpu_to_le32(dval); + p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt; + p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt; + p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt; + p_tcp->rt_cnt = p_conn->rt_cnt; + p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); + p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + dval = p_conn->initial_rcv_wnd; + p_tcp->initial_rcv_wnd = cpu_to_le32(dval); + p_tcp->ttl = p_conn->ttl; + p_tcp->tos_or_tc = p_conn->tos_or_tc; + p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp->local_port = cpu_to_le16(p_conn->local_port); + p_tcp->mss = cpu_to_le16(p_conn->mss); + p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; + p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; + dval = p_conn->ts_ticks_per_second; + p_tcp->ts_ticks_per_second = cpu_to_le32(dval); + wval = p_conn->da_timeout_value; + p_tcp->da_timeout_value = cpu_to_le16(wval); + p_tcp->ack_frequency = p_conn->ack_frequency; + p_tcp->connect_mode = p_conn->connect_mode; + } else { + p_tcp2 = + &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; + + p = (u16 *)p_conn->local_mac; + p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p = (u16 *)p_conn->remote_mac; + p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); + p_tcp2->flags = p_conn->tcp_flags; + + p_tcp2->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp2->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp2->local_ip[i] = cpu_to_le32(dval); + } + + p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp2->ttl = p_conn->ttl; + p_tcp2->tos_or_tc = p_conn->tos_or_tc; + p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp2->local_port = cpu_to_le16(p_conn->local_port); + p_tcp2->mss = cpu_to_le16(p_conn->mss); + p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale; + p_tcp2->connect_mode = p_conn->connect_mode; + wval = p_conn->syn_ip_payload_length; + p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); + p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); + p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_conn_update_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u32 dval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_update; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + p_ramrod->flags = p_conn->update_flag; + p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); + dval = p_conn->max_recv_pdu_length; + p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); + dval = p_conn->max_send_pdu_length; + p_ramrod->max_send_pdu_length = cpu_to_le32(dval); + dval = p_conn->first_seq_length; + p_ramrod->first_seq_length = cpu_to_le32(dval); + p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_termination *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_terminate; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + p_ramrod->abortive = p_conn->abortive_dsconnect; + + DMA_REGPAIR_LE(p_ramrod->query_params_addr, + p_conn->tcp_upload_params_phys_addr); + DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_slow_path_hdr *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_empty; + p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ; + SET_FIELD(p_ramrod->flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_func_dstry *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_destroy; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, + bdq_id); +} + +static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, + bdq_id); +} + +static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + if (!p_conn->queue_cnts_virt_addr) + goto nomem; + memset(p_conn->queue_cnts_virt_addr, 0, + sizeof(*p_conn->queue_cnts_virt_addr)); + + if (!p_conn->tcp_upload_params_virt_addr) + goto nomem; + memset(p_conn->tcp_upload_params_virt_addr, 0, + sizeof(*p_conn->tcp_upload_params_virt_addr)); + + if (!p_conn->r2tq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->r2tq); + + if (!p_conn->uhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->uhq); + + if (!p_conn->xhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->xhq); + + return 0; +nomem: + return -ENOMEM; +} + +static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn **p_out_conn) +{ + u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0; + struct scsi_terminate_extra_params *p_q_cnts = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct tcp_upload_params *p_tcp = NULL; + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + + /* Try finding a free connection that can be used */ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + if (!list_empty(&p_hwfn->p_iscsi_info->free_list)) + p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, + struct qed_iscsi_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + + /* Need to allocate a new connection */ + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_q_cnts), + &p_conn->queue_cnts_phys_addr, + GFP_KERNEL); + if (!p_q_cnts) + goto nomem_queue_cnts_param; + p_conn->queue_cnts_virt_addr = p_q_cnts; + + p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_tcp), + &p_conn->tcp_upload_params_phys_addr, + GFP_KERNEL); + if (!p_tcp) + goto nomem_upload_param; + p_conn->tcp_upload_params_virt_addr = p_tcp; + + r2tq_num_elements = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / 0x80; + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + r2tq_num_elements, 0x80, &p_conn->r2tq); + if (rc) + goto nomem_r2tq; + + uhq_num_elements = p_params->num_uhq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + uhq_num_elements, + sizeof(struct iscsi_uhqe), &p_conn->uhq); + if (rc) + goto nomem_uhq; + + xhq_num_elements = uhq_num_elements; + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + xhq_num_elements, + sizeof(struct iscsi_xhqe), &p_conn->xhq); + if (rc) + goto nomem; + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem: + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); +nomem_uhq: + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); +nomem_r2tq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct tcp_upload_params), + p_conn->tcp_upload_params_virt_addr, + p_conn->tcp_upload_params_phys_addr); +nomem_upload_param: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct scsi_terminate_extra_params), + p_conn->queue_cnts_virt_addr, + p_conn->queue_cnts_phys_addr); +nomem_queue_cnts_param: + kfree(p_conn); + + return -ENOMEM; +} + +static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_in_conn, + struct qed_iscsi_conn **p_out_conn) +{ + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + if (rc) + return rc; + + /* Use input connection or allocate a new one */ + if (p_in_conn) + p_conn = p_in_conn; + else + rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); + + if (!rc) + rc = qed_iscsi_setup_connection(p_hwfn, p_conn); + + if (rc) { + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + return rc; + } + + p_conn->icid = icid; + p_conn->conn_id = (u16)icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + + *p_out_conn = p_conn; + + return rc; +} + +static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); +} + +struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_iscsi_info *p_iscsi_info; + + p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); + if (!p_iscsi_info) + return NULL; + + INIT_LIST_HEAD(&p_iscsi_info->free_list); + return p_iscsi_info; +} + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) +{ + spin_lock_init(&p_iscsi_info->lock); +} + +void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) +{ + kfree(p_iscsi_info); +} + +static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct tstorm_iscsi_stats_drv tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->iscsi_rx_bytes_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt); + p_stats->iscsi_rx_packet_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt); + p_stats->iscsi_cmdq_threshold_cnt = + le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt); + p_stats->iscsi_rq_threshold_cnt = + le32_to_cpu(tstats.iscsi_rq_threshold_cnt); + p_stats->iscsi_immq_threshold_cnt = + le32_to_cpu(tstats.iscsi_immq_threshold_cnt); +} + +static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct mstorm_iscsi_stats_drv mstats; + u32 mstats_addr; + + memset(&mstats, 0, sizeof(mstats)); + mstats_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats)); + + p_stats->iscsi_rx_dropped_pdus_task_not_valid = + HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid); +} + +static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ustorm_iscsi_stats_drv ustats; + u32 ustats_addr; + + memset(&ustats, 0, sizeof(ustats)); + ustats_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); + + p_stats->iscsi_rx_data_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt); + p_stats->iscsi_rx_r2t_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt); + p_stats->iscsi_rx_total_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt); +} + +static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct xstorm_iscsi_stats_drv xstats; + u32 xstats_addr; + + memset(&xstats, 0, sizeof(xstats)); + xstats_addr = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats)); + + p_stats->iscsi_tx_go_to_slow_start_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt); + p_stats->iscsi_tx_fast_retransmit_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt); +} + +static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ystorm_iscsi_stats_drv ystats; + u32 ystats_addr; + + memset(&ystats, 0, sizeof(ystats)); + ystats_addr = BAR0_MAP_REG_YSDM_RAM + + YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats)); + + p_stats->iscsi_tx_data_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt); + p_stats->iscsi_tx_r2t_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt); + p_stats->iscsi_tx_total_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt); +} + +static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct pstorm_iscsi_stats_drv pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->iscsi_tx_bytes_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt); + p_stats->iscsi_tx_packet_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt); +} + +static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, + struct qed_iscsi_stats *stats) +{ + struct qed_ptt *p_ptt; + + memset(stats, 0, sizeof(*stats)); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EAGAIN; + } + + _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats); + + _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_iscsi_con { + struct hlist_node node; + struct qed_iscsi_conn *con; +}; + +static int qed_fill_iscsi_dev_info(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + return rc; +} + +static void qed_register_iscsi_ops(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.iscsi = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_iscsi_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_iscsi_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "iscsi already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop iscsi - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the iscsi */ + rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_iscsi_start(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, + iscsi_event_cb_t async_event_cb) +{ + int rc; + struct qed_tid_mem *tid_info; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "iscsi already started;\n"); + return 0; + } + + rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL, event_context, + async_event_cb); + if (rc) { + DP_NOTICE(cdev, "Failed to start iscsi\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (!tasks) + return 0; + + tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); + + if (!tid_info) { + qed_iscsi_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), + tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_iscsi_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_ISCSI * sizeof(u8 *)); + + kfree(tid_info); + + return 0; +} + +static int qed_iscsi_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_iscsi_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); + if (!hash_con) + return -ENOMEM; + + /* Acquire the connection */ + rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_iscsi_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + ether_addr_copy(con->local_mac, conn_info->src.mac); + ether_addr_copy(con->remote_mac, conn_info->dst.mac); + memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); + memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); + con->local_port = conn_info->src.port; + con->remote_port = conn_info->dst.port; + + con->layer_code = conn_info->layer_code; + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->initial_ack = conn_info->initial_ack; + con->vlan_id = conn_info->vlan_id; + con->tcp_flags = conn_info->tcp_flags; + con->ip_version = conn_info->ip_version; + con->default_cq = conn_info->default_cq; + con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; + con->dup_ack_theshold = conn_info->dup_ack_theshold; + con->rcv_next = conn_info->rcv_next; + con->snd_una = conn_info->snd_una; + con->snd_next = conn_info->snd_next; + con->snd_max = conn_info->snd_max; + con->snd_wnd = conn_info->snd_wnd; + con->rcv_wnd = conn_info->rcv_wnd; + con->snd_wl1 = conn_info->snd_wl1; + con->cwnd = conn_info->cwnd; + con->ss_thresh = conn_info->ss_thresh; + con->srtt = conn_info->srtt; + con->rtt_var = conn_info->rtt_var; + con->ts_time = conn_info->ts_time; + con->ts_recent = conn_info->ts_recent; + con->ts_recent_age = conn_info->ts_recent_age; + con->total_rt = conn_info->total_rt; + con->ka_timeout_delta = conn_info->ka_timeout_delta; + con->rt_timeout_delta = conn_info->rt_timeout_delta; + con->dup_ack_cnt = conn_info->dup_ack_cnt; + con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt; + con->ka_probe_cnt = conn_info->ka_probe_cnt; + con->rt_cnt = conn_info->rt_cnt; + con->flow_label = conn_info->flow_label; + con->ka_timeout = conn_info->ka_timeout; + con->ka_interval = conn_info->ka_interval; + con->max_rt_time = conn_info->max_rt_time; + con->initial_rcv_wnd = conn_info->initial_rcv_wnd; + con->ttl = conn_info->ttl; + con->tos_or_tc = conn_info->tos_or_tc; + con->remote_port = conn_info->remote_port; + con->local_port = conn_info->local_port; + con->mss = conn_info->mss; + con->snd_wnd_scale = conn_info->snd_wnd_scale; + con->rcv_wnd_scale = conn_info->rcv_wnd_scale; + con->ts_ticks_per_second = conn_info->ts_ticks_per_second; + con->da_timeout_value = conn_info->da_timeout_value; + con->ack_frequency = conn_info->ack_frequency; + + /* Set default values on other connection fields */ + con->offl_flags = 0x1; + + return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_update_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->update_flag = conn_info->update_flag; + con->max_seq_size = conn_info->max_seq_size; + con->max_recv_pdu_length = conn_info->max_recv_pdu_length; + con->max_send_pdu_length = conn_info->max_send_pdu_length; + con->first_seq_length = conn_info->first_seq_length; + con->exp_stat_sn = conn_info->exp_stat_sn; + + return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_destroy_conn(struct qed_dev *cdev, + u32 handle, u8 abrt_conn) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hash_con->con->abortive_dsconnect = abrt_conn; + + return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) +{ + return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); +} + +static const struct qed_iscsi_ops qed_iscsi_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_iscsi_dev_info, + .register_ops = &qed_register_iscsi_ops, + .start = &qed_iscsi_start, + .stop = &qed_iscsi_stop, + .acquire_conn = &qed_iscsi_acquire_conn, + .release_conn = &qed_iscsi_release_conn, + .offload_conn = &qed_iscsi_offload_conn, + .update_conn = &qed_iscsi_update_conn, + .destroy_conn = &qed_iscsi_destroy_conn, + .clear_sq = &qed_iscsi_clear_conn_sq, + .get_stats = &qed_iscsi_stats, +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void) +{ + return &qed_iscsi_ops_pass; +} +EXPORT_SYMBOL(qed_get_iscsi_ops); + +void qed_put_iscsi_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_iscsi_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h new file mode 100644 index 000000000000..67c25f3db4d5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -0,0 +1,52 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ISCSI_H +#define _QED_ISCSI_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/tcp_common.h> +#include <linux/qed/qed_iscsi_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_iscsi_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; + u16 max_num_outstanding_tasks; + void *event_context; + iscsi_event_cb_t event_cb; +}; + +#ifdef CONFIG_QED_LL2 +extern const struct qed_ll2_ops qed_ll2_ops_pass; +#endif + +#if IS_ENABLED(CONFIG_QED_ISCSI) +struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn); + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info); + +void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline struct qed_iscsi_info *qed_iscsi_alloc( + struct qed_hwfn *p_hwfn) { return NULL; } +static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) {} +static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index ddd410a91e13..6a3727c4c0c6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -23,6 +23,7 @@ #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/vmalloc.h> #include "qed.h" #include <linux/qed/qed_chain.h> #include "qed_cxt.h" @@ -41,6 +42,124 @@ #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ + if (!p_cid->is_vf && IS_PF(p_hwfn->cdev)) + qed_cxt_release_cid(p_hwfn, p_cid->cid); + vfree(p_cid); +} + +/* The internal is only meant to be directly called by PFs initializeing CIDs + * for their VFs. + */ +struct qed_queue_cid * +_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u8 vf_qid, + struct qed_queue_start_common_params *p_params) +{ + bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid); + struct qed_queue_cid *p_cid; + int rc; + + p_cid = vmalloc(sizeof(*p_cid)); + if (!p_cid) + return NULL; + memset(p_cid, 0, sizeof(*p_cid)); + + p_cid->opaque_fid = opaque_fid; + p_cid->cid = cid; + p_cid->vf_qid = vf_qid; + p_cid->rel = *p_params; + + /* Don't try calculating the absolute indices for VFs */ + if (IS_VF(p_hwfn->cdev)) { + p_cid->abs = p_cid->rel; + goto out; + } + + /* Calculate the engine-absolute indices of the resources. + * This would guarantee they're valid later on. + * In some cases [SBs] we already have the right values. + */ + rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); + if (rc) + goto fail; + + rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); + if (rc) + goto fail; + + /* In case of a PF configuring its VF's queues, the stats-id is already + * absolute [since there's a single index that's suitable per-VF]. + */ + if (b_is_same) { + rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, + &p_cid->abs.stats_id); + if (rc) + goto fail; + } else { + p_cid->abs.stats_id = p_cid->rel.stats_id; + } + + /* SBs relevant information was already provided as absolute */ + p_cid->abs.sb = p_cid->rel.sb; + p_cid->abs.sb_idx = p_cid->rel.sb_idx; + + /* This is tricky - we're actually interested in whehter this is a PF + * entry meant for the VF. + */ + if (!b_is_same) + p_cid->is_vf = true; +out: + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n", + p_cid->opaque_fid, + p_cid->cid, + p_cid->rel.vport_id, + p_cid->abs.vport_id, + p_cid->rel.queue_id, + p_cid->abs.queue_id, + p_cid->rel.stats_id, + p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); + + return p_cid; + +fail: + vfree(p_cid); + return NULL; +} + +static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, struct + qed_queue_start_common_params + *p_params) +{ + struct qed_queue_cid *p_cid; + u32 cid = 0; + + /* Get a unique firmware CID for this queue, in case it's a PF. + * VF's don't need a CID as the queue configuration will be done + * by PF. + */ + if (IS_PF(p_hwfn->cdev)) { + if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return NULL; + } + } + + p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params); + if (!p_cid && IS_PF(p_hwfn->cdev)) + qed_cxt_release_cid(p_hwfn, cid); + + return p_cid; +} + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { @@ -496,61 +615,26 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, return 0; } -static int qed_sp_release_queue_cid( - struct qed_hwfn *p_hwfn, - struct qed_hw_cid_data *p_cid_data) -{ - if (!p_cid_data->b_cid_allocated) - return 0; - - qed_cxt_release_cid(p_hwfn, p_cid_data->cid); - - p_cid_data->b_cid_allocated = false; - - return 0; -} - -int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, bool b_use_zone_a_prod) +int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_rx_cid; - u16 abs_rx_q_id = 0; - u8 abs_vport_id = 0; int rc = -EINVAL; - /* Store information for the stop */ - p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; - p_rx_cid->cid = cid; - p_rx_cid->opaque_fid = opaque_fid; - p_rx_cid->vport_id = p_params->vport_id; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id); - if (rc) - return rc; - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, - cid, p_params->queue_id, p_params->vport_id, p_params->sb); + "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", + p_cid->opaque_fid, p_cid->cid, + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = cid; - init_data.opaque_fid = opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -561,11 +645,11 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->vport_id = abs_vport_id; - p_ramrod->stats_counter_id = stats_id; - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); + p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = 0; p_ramrod->complete_event_flg = 1; @@ -575,85 +659,85 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - if (p_params->vf_qid || b_use_zone_a_prod) { - p_ramrod->vf_rx_prod_index = p_params->vf_qid; + if (p_cid->is_vf) { + p_ramrod->vf_rx_prod_index = p_cid->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", - b_use_zone_a_prod ? " [legacy]" : "", - p_params->vf_qid); - p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod; + !!p_cid->b_legacy_vf ? " [legacy]" : "", + p_cid->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf; } return qed_spq_post(p_hwfn, p_ent, NULL); } static int -qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_queue_start_common_params *p_params, +qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void __iomem **pp_prod) { - struct qed_hw_cid_data *p_rx_cid; u32 init_prod_val = 0; - u16 abs_l2_queue = 0; - u8 abs_stats_id = 0; - int rc; - if (IS_VF(p_hwfn->cdev)) { - return qed_vf_pf_rxq_start(p_hwfn, - p_params->queue_id, - p_params->sb, - (u8)p_params->sb_idx, - bd_max_bytes, - bd_chain_phys_addr, - cqe_pbl_addr, cqe_pbl_size, pp_prod); - } - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue); - if (rc) - return rc; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); - if (rc) - return rc; - - *pp_prod = (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); + *pp_prod = p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); + return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size); +} + +static int +qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct qed_rxq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + /* Allocate a CID for the queue */ - p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); - return rc; - } - p_rx_cid->b_cid_allocated = true; + p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + if (!p_cid) + return -ENOMEM; - rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, - opaque_fid, - p_rx_cid->cid, - p_params, - abs_stats_id, + if (IS_PF(p_hwfn->cdev)) { + rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, + &p_ret_params->p_prod); + } else { + rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, bd_max_bytes, bd_chain_phys_addr, - cqe_pbl_addr, cqe_pbl_size, false); + cqe_pbl_addr, + cqe_pbl_size, &p_ret_params->p_prod); + } + /* Provide the caller with a reference to as handler */ if (rc) - qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; return rc; } int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, + void **pp_rxq_handles, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg, @@ -663,8 +747,7 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, struct rx_queue_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_rx_cid; - u16 qid, abs_rx_q_id = 0; + struct qed_queue_cid *p_cid; int rc = -EINVAL; u8 i; @@ -673,12 +756,11 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, init_data.p_comp_data = p_comp_data; for (i = 0; i < num_rxqs; i++) { - qid = rx_queue_id + i; - p_rx_cid = &p_hwfn->p_rx_cids[qid]; + p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; /* Get SPQ entry */ - init_data.cid = p_rx_cid->cid; - init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_QUEUE_UPDATE, @@ -687,10 +769,9 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.rx_queue_update; + p_ramrod->vport_id = p_cid->abs.vport_id; - qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); - qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = complete_cqe_flg; p_ramrod->complete_event_flg = complete_event_flg; @@ -702,24 +783,19 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, return rc; } -int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, bool cqe_completion) +static int +qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + bool b_eq_completion_only, bool b_cqe_completion) { - struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - u16 abs_rx_q_id = 0; - int rc = -EINVAL; - - if (IS_VF(p_hwfn->cdev)) - return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion); + int rc; - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = p_rx_cid->cid; - init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -729,62 +805,53 @@ int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.rx_queue_stop; - - qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); - qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); /* Cleaning the queue requires the completion to arrive there. * In addition, VFs require the answer to come as eqe to PF. */ - p_ramrod->complete_cqe_flg = - (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && - !eq_completion_only) || cqe_completion; - p_ramrod->complete_event_flg = - !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || - eq_completion_only; + p_ramrod->complete_cqe_flg = (!p_cid->is_vf && + !b_eq_completion_only) || + b_cqe_completion; + p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only; - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; + int rc = -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, + eq_completion_only, + cqe_completion); + else + rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); - return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; } -int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params) +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_tx_cid; - u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u8 abs_vport_id; - - /* Store information for the stop */ - p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; - p_tx_cid->cid = cid; - p_tx_cid->opaque_fid = opaque_fid; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); - if (rc) - return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = cid; - init_data.opaque_fid = opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -794,96 +861,92 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.tx_queue_start; - p_ramrod->vport_id = abs_vport_id; + p_ramrod->vport_id = p_cid->abs.vport_id; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->stats_counter_id = stats_id; + p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); + p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; - p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); + p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); + p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); - pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); p_ramrod->qm_pq_id = cpu_to_le16(pq_id); return qed_spq_post(p_hwfn, p_ent, NULL); } static int -qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_queue_start_common_params *p_params, +qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u8 tc, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { - struct qed_hw_cid_data *p_tx_cid; union qed_qm_pq_params pq_params; - u8 abs_stats_id = 0; int rc; - if (IS_VF(p_hwfn->cdev)) { - return qed_vf_pf_txq_start(p_hwfn, - p_params->queue_id, - p_params->sb, - p_params->sb_idx, - pbl_addr, pbl_size, pp_doorbell); - } + memset(&pq_params, 0, sizeof(pq_params)); - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, + pbl_addr, pbl_size, + qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, + &pq_params)); if (rc) return rc; - p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; - memset(p_tx_cid, 0, sizeof(*p_tx_cid)); - memset(&pq_params, 0, sizeof(pq_params)); + /* Provide the caller with the necessary return values */ + *pp_doorbell = p_hwfn->doorbells + + qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); - /* Allocate a CID for the queue */ - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); - return rc; - } - p_tx_cid->b_cid_allocated = true; + return 0; +} - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, p_tx_cid->cid, - p_params->queue_id, p_params->vport_id, p_params->sb); - - rc = qed_sp_eth_txq_start_ramrod(p_hwfn, - opaque_fid, - p_tx_cid->cid, - p_params, - abs_stats_id, - pbl_addr, - pbl_size, - &pq_params); - - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); +static int +qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, + struct qed_txq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + + p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + if (!p_cid) + return -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + else + rc = qed_vf_pf_txq_start(p_hwfn, p_cid, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); if (rc) - qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; return rc; } -int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) +static int +qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { - struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; - - if (IS_VF(p_hwfn->cdev)) - return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id); + int rc; - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = p_tx_cid->cid; - init_data.opaque_fid = p_tx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -892,11 +955,22 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) if (rc) return rc; - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; + int rc; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); + else + rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); - return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; } static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) @@ -1652,6 +1726,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, if (IS_PF(cdev)) { int max_vf_vlan_filters = 0; + int max_vf_mac_filters = 0; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) @@ -1665,11 +1740,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_queues = cdev->num_hwfns; } - if (IS_QED_SRIOV(cdev)) + if (IS_QED_SRIOV(cdev)) { max_vf_vlan_filters = cdev->p_iov_info->total_vfs * QED_ETH_VF_NUM_VLAN_FILTERS; - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - + max_vf_mac_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_MAC_FILTERS; + } + info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_VLAN) - max_vf_vlan_filters; + info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_MAC) - + max_vf_mac_filters; ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); @@ -1683,7 +1765,9 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, } qed_vf_get_num_vlan_filters(&cdev->hwfns[0], - &info->num_vlan_filters); + (u8 *)&info->num_vlan_filters); + qed_vf_get_num_mac_filters(&cdev->hwfns[0], + (u8 *)&info->num_mac_filters); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; @@ -1870,58 +1954,53 @@ static int qed_update_vport(struct qed_dev *cdev, } static int qed_start_rxq(struct qed_dev *cdev, - struct qed_queue_start_common_params *params, + u8 rss_num, + struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, - void __iomem **pp_prod) + struct qed_rxq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = params->rss_id % cdev->num_hwfns; + hwfn_index = rss_num % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; - /* Fix queue ID in 100g mode */ - params->queue_id /= cdev->num_hwfns; - - rc = qed_sp_eth_rx_queue_start(p_hwfn, - p_hwfn->hw_info.opaque_fid, - params, - bd_max_bytes, - bd_chain_phys_addr, - cqe_pbl_addr, - cqe_pbl_size, - pp_prod); + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; + rc = qed_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, ret_params); if (rc) { - DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); + DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); return rc; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", - params->queue_id, params->rss_id, params->vport_id, - params->sb); + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + p_params->queue_id, rss_num, p_params->vport_id, + p_params->sb); return 0; } -static int qed_stop_rxq(struct qed_dev *cdev, - struct qed_stop_rxq_params *params) +static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) { int rc, hwfn_index; struct qed_hwfn *p_hwfn; - hwfn_index = params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; - rc = qed_sp_eth_rx_queue_stop(p_hwfn, - params->rx_queue_id / cdev->num_hwfns, - params->eq_completion_only, false); + rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); if (rc) { - DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); + DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); return rc; } @@ -1929,26 +2008,24 @@ static int qed_stop_rxq(struct qed_dev *cdev, } static int qed_start_txq(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *p_params, dma_addr_t pbl_addr, u16 pbl_size, - void __iomem **pp_doorbell) + struct qed_txq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = p_params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; - - /* Fix queue ID in 100g mode */ - p_params->queue_id /= cdev->num_hwfns; + hwfn_index = rss_num % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; - rc = qed_sp_eth_tx_queue_start(p_hwfn, - p_hwfn->hw_info.opaque_fid, - p_params, - pbl_addr, - pbl_size, - pp_doorbell); + rc = qed_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, 0, + pbl_addr, pbl_size, ret_params); if (rc) { DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); @@ -1956,8 +2033,8 @@ static int qed_start_txq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", - p_params->queue_id, p_params->rss_id, p_params->vport_id, + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + p_params->queue_id, rss_num, p_params->vport_id, p_params->sb); return 0; @@ -1971,19 +2048,17 @@ static int qed_fastpath_stop(struct qed_dev *cdev) return 0; } -static int qed_stop_txq(struct qed_dev *cdev, - struct qed_stop_txq_params *params) +static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; - rc = qed_sp_eth_tx_queue_stop(p_hwfn, - params->tx_queue_id / cdev->num_hwfns); + rc = qed_eth_tx_queue_stop(p_hwfn, handle); if (rc) { - DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); + DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index e495d62fcc03..48c9bfc28140 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -78,11 +78,34 @@ struct qed_filter_mcast { unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; }; -int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, bool cqe_completion); +/** + * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * + * @param p_hwfn + * @param p_rxq Handler of queue to close + * @param eq_completion_only If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @param cqe_completion If True completion will be + * receive on CQe. + * @return int + */ +int +qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion); -int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id); +/** + * @brief qed_eth_tx_queue_stop - closes a Tx queue + * + * @param p_hwfn + * @param p_txq - handle to Tx queue needed to be closed + * + * @return int + */ +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); enum qed_tpa_mode { QED_TPA_MODE_NONE, @@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, * @note At the moment - only used by non-linux VFs. * * @param p_hwfn - * @param rx_queue_id RX Queue ID - * @param num_rxqs Allow to update multiple rx - * queues, from rx_queue_id to - * (rx_queue_id + num_rxqs) + * @param pp_rxq_handlers An array of queue handlers to be updated. + * @param num_rxqs number of queues to update. * @param complete_cqe_flg Post completion to the CQE Ring if set * @param complete_event_flg Post completion to the Event Ring if set + * @param comp_mode + * @param p_comp_data * * @return int */ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, + void **pp_rxq_handlers, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg, @@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); -int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_start_params *p_params); +void qed_reset_vport_stats(struct qed_dev *cdev); + +struct qed_queue_cid { + /* 'Relative' is a relative term ;-). Usually the indices [not counting + * SBs] would be PF-relative, but there are some cases where that isn't + * the case - specifically for a PF configuring its VF indices it's + * possible some fields [E.g., stats-id] in 'rel' would already be abs. + */ + struct qed_queue_start_common_params rel; + struct qed_queue_start_common_params abs; + u32 cid; + u16 opaque_fid; + + /* VFs queues are mapped differently, so we need to know the + * relative queue associated with them [0-based]. + * Notice this is relevant on the *PF* queue-cid of its VF's queues, + * and not on the VF itself. + */ + bool is_vf; + u8 vf_qid; + + /* Legacy VFs might have Rx producer located elsewhere */ + bool b_legacy_vf; +}; -int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, bool b_use_zone_a_prod); - -int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params); +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid); + +struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u8 vf_qid, + struct qed_queue_start_common_params + *p_params); + +int +qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +/** + * @brief - Starts an Rx queue, when queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param bd_max_bytes + * @param bd_chain_phys_addr + * @param cqe_pbl_addr + * @param cqe_pbl_size + * + * @return int + */ +int +qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +/** + * @brief - Starts a Tx queue, where queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param pbl_addr + * @param pbl_size + * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * + * @return int + */ +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id); u8 qed_mcast_bin_from_mac(u8 *mac); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 63e1a1b0ef8e..8e5cb7605b0f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -36,6 +36,7 @@ #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_roce.h" @@ -296,25 +297,34 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - p_tx->cur_completing_packet = *p_pkt; - p_tx->cur_completing_bd_idx = 1; - b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; - tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->gsi_enable) - qed_ll2b_release_tx_gsi_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer; + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + p_tx->cur_completing_packet = *p_pkt; + p_tx->cur_completing_bd_idx = 1; + b_last_frag = + p_tx->cur_completing_bd_idx == p_pkt->bd_used; + tx_frag = p_pkt->bds_set[0].tx_frag; + if (p_ll2_conn->gsi_enable) + qed_ll2b_release_tx_gsi_packet(p_hwfn, + p_ll2_conn-> + my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + else + qed_ll2b_complete_tx_packet(p_hwfn, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + } } } @@ -540,12 +550,457 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - rx_buf_addr = p_pkt->rx_buf_addr; - cookie = p_pkt->cookie; + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer; + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + rx_buf_addr = p_pkt->rx_buf_addr; + cookie = p_pkt->cookie; + + b_last = list_empty(&p_rx->active_descq); + } + } +} + +#if IS_ENABLED(CONFIG_QED_ISCSI) +static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) +{ + u8 bd_flags = 0; + + if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); + + return bd_flags; +} + +static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + u16 packet_length = 0, parse_flags = 0, vlan = 0; + struct qed_ll2_rx_packet *p_pkt = NULL; + u32 num_ooo_add_to_peninsula = 0, cid; + union core_rx_cqe_union *cqe = NULL; + u16 cq_new_idx = 0, cq_old_idx = 0; + struct qed_ooo_buffer *p_buffer; + struct ooo_opaque *iscsi_ooo; + u8 placement_offset = 0; + u8 cqe_type; + + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + if (cq_new_idx == cq_old_idx) + return 0; + + while (cq_new_idx != cq_old_idx) { + struct core_rx_fast_path_cqe *p_cqe_fp; + + cqe = qed_chain_consume(&p_rx->rcq_chain); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + cqe_type = cqe->rx_cqe_sp.type; + + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + DP_NOTICE(p_hwfn, + "Got a non-regular LB LL2 completion [type 0x%02x]\n", + cqe_type); + return -EINVAL; + } + p_cqe_fp = &cqe->rx_cqe_fp; + + placement_offset = p_cqe_fp->placement_offset; + parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); + packet_length = le16_to_cpu(p_cqe_fp->packet_length); + vlan = le16_to_cpu(p_cqe_fp->vlan); + iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, + iscsi_ooo); + cid = le32_to_cpu(iscsi_ooo->cid); + + /* Process delete isle first */ + if (iscsi_ooo->drop_size) + qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, + iscsi_ooo->drop_isle, + iscsi_ooo->drop_size); + + if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + continue; + + /* Now process create/add/join isles */ + if (list_empty(&p_rx->active_descq)) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX chain has no submitted buffers\n" + ); + return -EIO; + } + + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + + if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { + if (!p_pkt) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX packet is not valid\n"); + return -EIO; + } + list_del(&p_pkt->list_entry); + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + p_buffer->packet_length = packet_length; + p_buffer->parse_flags = parse_flags; + p_buffer->vlan = vlan; + p_buffer->placement_offset = placement_offset; + qed_chain_consume(&p_rx->rxq_chain); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + + switch (iscsi_ooo->ooo_opcode) { + case TCP_EVENT_ADD_NEW_ISLE: + qed_ooo_add_new_isle(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer); + break; + case TCP_EVENT_ADD_ISLE_RIGHT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_RIGHT_BUF); + break; + case TCP_EVENT_ADD_ISLE_LEFT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_LEFT_BUF); + break; + case TCP_EVENT_JOIN: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle + + 1, + p_buffer, + QED_OOO_LEFT_BUF); + qed_ooo_join_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid, iscsi_ooo->ooo_isle); + break; + case TCP_EVENT_ADD_PEN: + num_ooo_add_to_peninsula++; + qed_ooo_put_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info, + p_buffer, true); + break; + } + } else { + DP_NOTICE(p_hwfn, + "Unexpected event (%d) TX OOO completion\n", + iscsi_ooo->ooo_opcode); + } + } + + return 0; +} + +static void +qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + u16 l4_hdr_offset_w; + dma_addr_t first_frag; + u16 parse_flags; + u8 bd_flags; + + /* Submit Tx buffers here */ + while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + l4_hdr_offset_w = 0; + bd_flags = 0; + + first_frag = p_buffer->rx_buffer_phys_addr + + p_buffer->placement_offset; + parse_flags = p_buffer->parse_flags; + bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, + p_buffer->vlan, bd_flags, + l4_hdr_offset_w, + p_ll2_conn->tx_dest, 0, + first_frag, + p_buffer->packet_length, + p_buffer, true); + if (rc) { + qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer, false); + break; + } + } +} + +static void +qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + rc = qed_ll2_post_rx_buffer(p_hwfn, + p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, + 0, p_buffer, true); + if (rc) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + break; + } + } +} + +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + int rc; + + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); + if (rc) + return rc; + + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_ll2_tx_packet *p_pkt = NULL; + struct qed_ooo_buffer *p_buffer; + bool b_dont_submit_rx = false; + u16 new_idx = 0, num_bds = 0; + int rc; + + new_idx = le16_to_cpu(*p_tx->p_fw_cons); + num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); + + if (!num_bds) + return 0; + + while (num_bds) { + if (list_empty(&p_tx->active_descq)) + return -EINVAL; + + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + return -EINVAL; + + if (p_pkt->bd_used != 1) { + DP_NOTICE(p_hwfn, + "Unexpectedly many BDs(%d) in TX OOO completion\n", + p_pkt->bd_used); + return -EINVAL; + } + + list_del(&p_pkt->list_entry); + + num_bds--; + p_tx->bds_idx++; + qed_chain_consume(&p_tx->txq_chain); + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + + if (b_dont_submit_rx) { + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + continue; + } + + rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, 0, + p_buffer, true); + if (rc != 0) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + b_dont_submit_rx = true; + } + } + + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return rc; + + if (!rx_num_ooo_buffers) + return -EINVAL; + + for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: + return rc; +} + +static void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); +} + +static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } +} + +static void qed_ll2_stop_ooo(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n", + *handle); + + qed_ll2_terminate_connection(hwfn, *handle); + qed_ll2_release_connection(hwfn, *handle); + *handle = QED_LL2_UNUSED_HANDLE; +} + +static int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_info *ll2_info; + int rc; + + ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); + if (!ll2_info) + return -ENOMEM; + ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; + ll2_info->mtu = params->mtu; + ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; + ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; + ll2_info->tx_tc = OOO_LB_TC; + ll2_info->tx_dest = CORE_TX_DEST_LB; + + rc = qed_ll2_acquire_connection(hwfn, ll2_info, + QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, + handle); + kfree(ll2_info); + if (rc) { + DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); + goto out; + } - b_last = list_empty(&p_rx->active_descq); + rc = qed_ll2_establish_connection(hwfn, *handle); + if (rc) { + DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); + goto fail; } + + return 0; + +fail: + qed_ll2_release_connection(hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; } +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static inline int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) { return 0; } +static inline void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void +qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; } +static inline int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) + { return -EINVAL; } +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, @@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = 1; + p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 + : 1; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { @@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + p_ll2_conn->tx_stats_en = 0; + else + p_ll2_conn->tx_stats_en = 1; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; @@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); - p_ll2_conn->tx_stats_en = 1; p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; @@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, if (rc) goto q_allocate_fail; + rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, + rx_num_desc * 2, p_params->mtu); + if (rc) + goto q_allocate_fail; + /* Register callbacks for the Rx/Tx queues */ - comp_rx_cb = qed_ll2_rxq_completion; - comp_tx_cb = qed_ll2_txq_completion; + if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + comp_rx_cb = qed_ll2_lb_rxq_completion; + comp_tx_cb = qed_ll2_lb_txq_completion; + } else { + comp_rx_cb = qed_ll2_rxq_completion; + comp_tx_cb = qed_ll2_txq_completion; + } if (rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, @@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1); + qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + return rc; } @@ -1119,6 +1591,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << CORE_TX_BD_FLAGS_START_BD_SHIFT; SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); + SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); DMA_REGPAIR_LE(start_bd->addr, first_frag); start_bd->nbytes = cpu_to_le16(first_frag_len); @@ -1212,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u16 vlan, u8 bd_flags, u16 l4_hdr_offset_w, + enum qed_ll2_tx_dest e_tx_dest, enum qed_ll2_roce_flavor_type qed_roce_flavor, dma_addr_t first_frag, u16 first_frag_len, void *cookie, u8 notify_fw) @@ -1221,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, enum core_roce_flavor_type roce_flavor; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; + enum core_tx_dest tx_dest; unsigned long flags; int rc = 0; @@ -1251,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, goto out; } + tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW : + CORE_TX_DEST_LB; if (qed_roce_flavor == QED_LL2_ROCE) { roce_flavor = CORE_ROCE; } else if (qed_roce_flavor == QED_LL2_RROCE) { @@ -1265,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, num_of_bds, first_frag, first_frag_len, cookie, notify_fw); qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, - num_of_bds, CORE_TX_DEST_NW, + num_of_bds, tx_dest, vlan, bd_flags, l4_hdr_offset_w, roce_flavor, first_frag, first_frag_len); @@ -1340,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + return rc; } @@ -1370,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); + qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); + mutex_lock(&p_ll2_conn->mutex); p_ll2_conn->b_active = false; mutex_unlock(&p_ll2_conn->mutex); @@ -1516,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) enum qed_ll2_conn_type conn_type; struct qed_ptt *p_ptt; int rc, i; + u8 gsi_enable = 1; /* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); @@ -1547,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; + gsi_enable = 0; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@ -1563,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; ll2_info.tx_tc = 0; ll2_info.tx_dest = CORE_TX_DEST_NW; - ll2_info.gsi_enable = 1; + ll2_info.gsi_enable = gsi_enable; rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, @@ -1610,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) goto release_terminate; } + if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); + rc = qed_ll2_start_ooo(cdev, params); + if (rc) { + DP_INFO(cdev, + "Failed to initialize the OOO LL2 queue\n"); + goto release_terminate; + } + } + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (!p_ptt) { DP_INFO(cdev, "Failed to acquire PTT\n"); @@ -1659,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev) qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); eth_zero_addr(cdev->ll2_mac_address); + if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) + qed_ll2_stop_ooo(cdev); + rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); if (rc) @@ -1713,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), cdev->ll2->handle, 1 + skb_shinfo(skb)->nr_frags, - vlan, flags, 0, 0 /* RoCE FLAVOR */, + vlan, flags, 0, QED_LL2_TX_DEST_NW, + 0 /* RoCE FLAVOR */, mapping, skb->len, skb, 1); if (rc) goto err; @@ -1729,6 +2229,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; goto err; } } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 4e3d62a16cab..6625a3ae5a33 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -41,6 +41,12 @@ enum qed_ll2_conn_type { MAX_QED_LL2_RX_CONN_TYPE }; +enum qed_ll2_tx_dest { + QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ + QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_MAX +}; + struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, * @param l4_hdr_offset_w L4 Header Offset from start of packet * (in words). This is needed if both l4_csum * and ipv6_ext are set + * @param e_tx_dest indicates if the packet is to be transmitted via + * loopback or to the network * @param first_frag * @param first_frag_len * @param cookie @@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u16 vlan, u8 bd_flags, u16 l4_hdr_offset_w, + enum qed_ll2_tx_dest e_tx_dest, enum qed_ll2_roce_flavor_type qed_roce_flavor, dma_addr_t first_frag, u16 first_frag_len, void *cookie, u8 notify_fw); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c418360ba02a..aeb98d8c5626 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -221,6 +221,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->mf_mode = cdev->mf_mode; dev_info->tx_switching = true; + + if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == + QED_WOL_SUPPORT_PME) + dev_info->wol_support = true; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, @@ -243,6 +247,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, &dev_info->mfw_rev, NULL); } + dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; + return 0; } @@ -839,20 +845,19 @@ static void qed_update_pf_params(struct qed_dev *cdev, { int i; + if (IS_ENABLED(CONFIG_QED_RDMA)) { + params->rdma_pf_params.num_qps = QED_ROCE_QPS; + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + /* divide by 3 the MRs to avoid MF ILT overflow */ + params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; + } + for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->pf_params = *params; } - - if (!IS_ENABLED(CONFIG_QED_RDMA)) - return; - - params->rdma_pf_params.num_qps = QED_ROCE_QPS; - params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; - /* divide by 3 the MRs to avoid MF ILT overflow */ - params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; - params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } static int qed_slowpath_start(struct qed_dev *cdev, @@ -1431,11 +1436,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +static int qed_update_wol(struct qed_dev *cdev, bool enabled) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED + : QED_OV_WOL_DISABLED); + if (rc) + goto out; + rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return rc; +} + +static int qed_update_drv_state(struct qed_dev *cdev, bool active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? + QED_OV_DRIVER_STATE_ACTIVE : + QED_OV_DRIVER_STATE_DISABLED); + + qed_ptt_release(hwfn, ptt); + + return status; +} + +static int qed_update_mac(struct qed_dev *cdev, u8 *mac) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mac(hwfn, ptt, mac); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + +static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, .selftest_register = &qed_selftest_register, .selftest_clock = &qed_selftest_clock, + .selftest_nvram = &qed_selftest_nvram, }; const struct qed_common_ops qed_common_ops_pass = { @@ -1465,6 +1565,10 @@ const struct qed_common_ops qed_common_ops_pass = { .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, + .update_drv_state = &qed_update_drv_state, + .update_mac = &qed_update_mac, + .update_mtu = &qed_update_mtu, + .update_wol = &qed_update_wol, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index bdc9ba92f6d4..6dd3ce443484 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/etherdevice.h> #include "qed.h" #include "qed_dcbx.h" #include "qed_hsi.h" @@ -329,6 +330,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_mcp_mb_params *p_mb_params) { u32 union_data_addr; + int rc; /* MCP not initialized */ @@ -374,11 +376,32 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param) { struct qed_mcp_mb_params mb_params; + union drv_union_data data_src; int rc; memset(&mb_params, 0, sizeof(mb_params)); + memset(&data_src, 0, sizeof(data_src)); mb_params.cmd = cmd; mb_params.param = param; + + /* In case of UNLOAD_DONE, set the primary MAC */ + if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) && + (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) { + u8 *p_mac = p_hwfn->cdev->wol_mac; + + data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; + data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | + p_mac[4] << 8 | p_mac[5]; + + DP_VERBOSE(p_hwfn, + (QED_MSG_SP | NETIF_MSG_IFDOWN), + "Setting WoL MAC: %pM --> [%08x,%08x]\n", + p_mac, data_src.wol_mac.mac_upper, + data_src.wol_mac.mac_lower); + + mb_params.p_data_src = &data_src; + } + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; @@ -1001,28 +1024,89 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) return 0; } +/* Old MFW has a global configuration for all PFs regarding RDMA support */ +static void +qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, + enum qed_pci_personality *p_proto) +{ + /* There wasn't ever a legacy MFW that published iwarp. + * So at this point, this is either plain l2 or RoCE. + */ + if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) + *p_proto = QED_PCI_ETH_ROCE; + else + *p_proto = QED_PCI_ETH; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "According to Legacy capabilities, L2 personality is %08x\n", + (u32) *p_proto); +} + +static int +qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality *p_proto) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp != FW_MSG_CODE_OK) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "MFW lacks support for command; Returns %08x\n", + resp); + return -EINVAL; + } + + switch (param) { + case FW_MB_PARAM_GET_PF_RDMA_NONE: + *p_proto = QED_PCI_ETH; + break; + case FW_MB_PARAM_GET_PF_RDMA_ROCE: + *p_proto = QED_PCI_ETH_ROCE; + break; + case FW_MB_PARAM_GET_PF_RDMA_BOTH: + DP_NOTICE(p_hwfn, + "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n"); + *p_proto = QED_PCI_ETH_ROCE; + break; + case FW_MB_PARAM_GET_PF_RDMA_IWARP: + default: + DP_NOTICE(p_hwfn, + "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", + param); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", + (u32) *p_proto, resp, param); + return 0; +} + static int qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, struct public_func *p_info, + struct qed_ptt *p_ptt, enum qed_pci_personality *p_proto) { int rc = 0; switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - if (test_bit(QED_DEV_CAP_ROCE, - &p_hwfn->hw_info.device_capabilities)) - *p_proto = QED_PCI_ETH_ROCE; - else - *p_proto = QED_PCI_ETH; + if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); - rc = -EINVAL; - break; + /* Fallthrough */ default: rc = -EINVAL; } @@ -1042,7 +1126,8 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; - if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, + &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; @@ -1057,6 +1142,9 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->mac[3] = (u8)(shmem_info.mac_lower >> 16); info->mac[4] = (u8)(shmem_info.mac_lower >> 8); info->mac[5] = (u8)(shmem_info.mac_lower); + + /* Store primary MAC for later possible WoL */ + memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); } else { DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); } @@ -1068,13 +1156,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + info->mtu = (u16)shmem_info.mtu_size; + + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; + p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; + if (qed_mcp_is_init(p_hwfn)) { + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; + } + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), - "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n", + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, info->mac[0], info->mac[1], info->mac[2], info->mac[3], info->mac[4], info->mac[5], - info->wwn_port, info->wwn_node, info->ovlan); + info->wwn_port, info->wwn_node, + info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); return 0; } @@ -1223,6 +1328,178 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; } +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (client) { + case QED_OV_CLIENT_DRV: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; + break; + case QED_OV_CLIENT_USER: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; + break; + case QED_OV_CLIENT_VENDOR_SPEC: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; + break; + default: + DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (drv_state) { + case QED_OV_DRIVER_STATE_NOT_LOADED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; + break; + case QED_OV_DRIVER_STATE_DISABLED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; + break; + case QED_OV_DRIVER_STATE_ACTIVE: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; + break; + default: + DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send driver state\n"); + + return rc; +} + +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); + + return rc; +} + +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *mac) +{ + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VMAC; + mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << + DRV_MSG_CODE_VMAC_TYPE_SHIFT; + mb_params.param |= MCP_PF_ID(p_hwfn); + ether_addr_copy(&union_data.raw_data[0], mac); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); + + /* Store primary MAC for later possible WoL */ + memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); + + return rc; +} + +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_ov_wol wol) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Can't change WoL configuration when WoL isn't supported\n"); + return -EINVAL; + } + + switch (wol) { + case QED_OV_WOL_DEFAULT: + drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; + break; + case QED_OV_WOL_DISABLED: + drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; + break; + default: + DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); + + /* Store the WoL update for a future unload */ + p_hwfn->cdev->wol_config = (u8)wol; + + return rc; +} + +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (eswitch) { + case QED_OV_ESWITCH_NONE: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; + break; + case QED_OV_ESWITCH_VEB: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; + break; + case QED_OV_ESWITCH_VEPA: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; + break; + default: + DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); + + return rc; +} + int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode) { @@ -1271,6 +1548,52 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, return rc; } +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) +{ + u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + u32 resp = 0, resp_param = 0; + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, + addr + offset + + (bytes_to_copy << + DRV_MB_PARAM_NVM_LEN_SHIFT), + &resp, &resp_param, + &read_len, + (u32 *)(p_buf + offset)); + + if (rc || (resp != FW_MSG_CODE_NVM_OK)) { + DP_NOTICE(cdev, "MCP command rc = %d\n", rc); + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't preemptable. Sleep a bit to prevent CPU hogging. + */ + if (bytes_left % 0x1000 < + (bytes_left - read_len) % 0x1000) + usleep_range(1000, 2000); + + offset += read_len; + bytes_left -= read_len; + } + + cdev->mcp_nvm_resp = resp; + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; @@ -1312,3 +1635,101 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return rc; } + +int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images) +{ + u32 drv_mb_param = 0, rsp; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, num_images); + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + rc = -EINVAL; + + return rc; +} + +int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index) +{ + u32 buf_size = 0, param, resp = 0, resp_param = 0; + int rc; + + param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; + param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_BIST_TEST, param, + &resp, &resp_param, + &buf_size, + (u32 *)p_image_att); + if (rc) + return rc; + + if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (p_image_att->return_code != 1)) + rc = -EINVAL; + + return rc; +} + +#define QED_RESC_ALLOC_VERSION_MAJOR 1 +#define QED_RESC_ALLOC_VERSION_MINOR 0 +#define QED_RESC_ALLOC_VERSION \ + ((QED_RESC_ALLOC_VERSION_MAJOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ + (QED_RESC_ALLOC_VERSION_MINOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) +int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct resource_info *p_resc_info, + u32 *p_mcp_resp, u32 *p_mcp_param) +{ + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + memset(&union_data, 0, sizeof(union_data)); + mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + mb_params.param = QED_RESC_ALLOC_VERSION; + + /* Need to have a sufficient large struct, as the cmd_and_union + * is going to do memcpy from and to it. + */ + memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info)); + + mb_params.p_data_src = &union_data; + mb_params.p_data_dst = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + /* Copy the data back */ + memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info)); + *p_mcp_resp = mb_params.mcp_resp; + *p_mcp_param = mb_params.mcp_param; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n", + *p_mcp_param, + p_resc_info->res_id, + p_resc_info->size, + p_resc_info->offset, + p_resc_info->vf_size, + p_resc_info->vf_offset, p_resc_info->flags); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dff520ed069b..407a2c1830fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -92,6 +92,8 @@ struct qed_mcp_function_info { #define QED_MCP_VLAN_UNSET (0xffff) u16 ovlan; + + u16 mtu; }; struct qed_mcp_nvm_common { @@ -147,6 +149,30 @@ union qed_mcp_protocol_stats { struct qed_mcp_rdma_stats rdma_stats; }; +enum qed_ov_eswitch { + QED_OV_ESWITCH_NONE, + QED_OV_ESWITCH_VEB, + QED_OV_ESWITCH_VEPA +}; + +enum qed_ov_client { + QED_OV_CLIENT_DRV, + QED_OV_CLIENT_USER, + QED_OV_CLIENT_VENDOR_SPEC +}; + +enum qed_ov_driver_state { + QED_OV_DRIVER_STATE_NOT_LOADED, + QED_OV_DRIVER_STATE_DISABLED, + QED_OV_DRIVER_STATE_ACTIVE +}; + +enum qed_ov_wol { + QED_OV_WOL_DEFAULT, + QED_OV_WOL_DISABLED, + QED_OV_WOL_ENABLED +}; + /** * @brief - returns the link params of the hw function * @@ -278,6 +304,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** + * @brief Notify MFW about the change in base device properties + * + * @param p_hwfn + * @param p_ptt + * @param client - qed client type + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client); + +/** + * @brief Notify MFW about the driver state + * + * @param p_hwfn + * @param p_ptt + * @param drv_state - Driver state + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state); + +/** + * @brief Send MTU size to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mtu - MTU size + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu); + +/** + * @brief Send MAC address to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mac - MAC address + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *mac); + +/** + * @brief Send WOL mode to MFW + * + * @param p_hwfn + * @param p_ptt + * @param wol - WOL mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_wol wol); + +/** * @brief Set LED status * * @param p_hwfn @@ -291,6 +380,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, enum qed_led_mode mode); /** + * @brief Read from nvm + * + * @param cdev + * @param addr - nvm offset + * @param p_buf - nvm read buffer + * @param len - buffer len + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); + +/** * @brief Bist register test * * @param p_hwfn - hw function @@ -312,6 +413,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Bist nvm test - get number of images + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param num_images - number of images if operation was + * successful. 0 if not. + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images); + +/** + * @brief Bist nvm test - get image attributes by index + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_image_att - Attributes of image + * @param image_index - Index of image to get information for + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -546,4 +676,32 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); +/** + * @brief Send eswitch mode to MFW + * + * @param p_hwfn + * @param p_ptt + * @param eswitch - eswitch mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch); + +/** + * @brief - Gets the MFW allocation info for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param p_resc_info - descriptor of requested resource + * @param p_mcp_resp + * @param p_mcp_param + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct resource_info *p_resc_info, + u32 *p_mcp_resp, u32 *p_mcp_param); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c new file mode 100644 index 000000000000..155abcb507fd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -0,0 +1,501 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_ooo.h" + +static struct qed_ooo_archipelago +*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn, + struct qed_ooo_info + *p_ooo_info, + u32 cid) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + + list_for_each_entry(p_archipelago, + &p_ooo_info->archipelagos_list, list_entry) { + if (p_archipelago->cid == cid) + return p_archipelago; + } + + return NULL; +} + +static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 isle) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_isle = NULL; + u8 the_num_of_isle = 1; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return NULL; + } + + list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) { + if (the_num_of_isle == isle) + return p_isle; + the_num_of_isle++; + } + + return NULL; +} + +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) +{ + struct qed_ooo_history *p_history = &p_ooo_info->ooo_history; + + if (p_history->head_idx == p_history->num_of_cqes) + p_history->head_idx = 0; + p_history->p_cqes[p_history->head_idx] = *p_cqe; + p_history->head_idx++; +} + +struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ooo_info *p_ooo_info; + u16 max_num_archipelagos = 0; + u16 max_num_isles = 0; + u32 i; + + if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown personality\n"); + return NULL; + } + + max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; + max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; + + if (!max_num_archipelagos) { + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown amount of connections\n"); + return NULL; + } + + p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); + if (!p_ooo_info) + return NULL; + + INIT_LIST_HEAD(&p_ooo_info->free_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->free_isles_list); + INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list); + INIT_LIST_HEAD(&p_ooo_info->archipelagos_list); + + p_ooo_info->p_isles_mem = kcalloc(max_num_isles, + sizeof(struct qed_ooo_isle), + GFP_KERNEL); + if (!p_ooo_info->p_isles_mem) + goto no_isles_mem; + + for (i = 0; i < max_num_isles; i++) { + INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list); + list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry, + &p_ooo_info->free_isles_list); + } + + p_ooo_info->p_archipelagos_mem = + kcalloc(max_num_archipelagos, + sizeof(struct qed_ooo_archipelago), + GFP_KERNEL); + if (!p_ooo_info->p_archipelagos_mem) + goto no_archipelagos_mem; + + for (i = 0; i < max_num_archipelagos; i++) { + INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list); + list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry, + &p_ooo_info->free_archipelagos_list); + } + + p_ooo_info->ooo_history.p_cqes = + kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES, + sizeof(struct ooo_opaque), + GFP_KERNEL); + if (!p_ooo_info->ooo_history.p_cqes) + goto no_history_mem; + + return p_ooo_info; + +no_history_mem: + kfree(p_ooo_info->p_archipelagos_mem); +no_archipelagos_mem: + kfree(p_ooo_info->p_isles_mem); +no_isles_mem: + kfree(p_ooo_info); + return NULL; +} + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid) +{ + struct qed_ooo_archipelago *p_archipelago; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + bool b_found = false; + + if (list_empty(&p_ooo_info->archipelagos_list)) + return; + + list_for_each_entry(p_archipelago, + &p_ooo_info->archipelagos_list, list_entry) { + if (p_archipelago->cid == cid) { + list_del(&p_archipelago->list_entry); + b_found = true; + break; + } + } + + if (!b_found) + return; + + while (!list_empty(&p_archipelago->isles_list)) { + p_isle = list_first_entry(&p_archipelago->isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } + + list_add_tail(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); +} + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_archipelago *p_arch; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + + while (!list_empty(&p_ooo_info->archipelagos_list)) { + p_arch = list_first_entry(&p_ooo_info->archipelagos_list, + struct qed_ooo_archipelago, + list_entry); + + list_del(&p_arch->list_entry); + + while (!list_empty(&p_arch->isles_list)) { + p_isle = list_first_entry(&p_arch->isles_list, + struct qed_ooo_isle, + list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = + list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } + list_add_tail(&p_arch->list_entry, + &p_ooo_info->free_archipelagos_list); + } + if (!list_empty(&p_ooo_info->ready_buffers_list)) + list_splice_tail_init(&p_ooo_info->ready_buffers_list, + &p_ooo_info->free_buffers_list); +} + +void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +{ + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); + memset(p_ooo_info->ooo_history.p_cqes, 0, + p_ooo_info->ooo_history.num_of_cqes * + sizeof(struct ooo_opaque)); + p_ooo_info->ooo_history.head_idx = 0; +} + +void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer; + + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); + while (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } + + kfree(p_ooo_info->p_isles_mem); + kfree(p_ooo_info->p_archipelagos_mem); + kfree(p_ooo_info->ooo_history.p_cqes); + kfree(p_ooo_info); +} + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) +{ + list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail) +{ + if (on_tail) + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); + else + list_add(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->ready_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_isle = NULL; + u8 isle_idx; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); + if (!p_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + drop_isle, cid); + return; + } + if (list_empty(&p_isle->buffers_list)) + DP_NOTICE(p_hwfn, + "Isle %d is empty(cid %d)\n", drop_isle, cid); + else + list_splice_tail_init(&p_isle->buffers_list, + &p_ooo_info->free_buffers_list); + + list_del(&p_isle->list_entry); + p_ooo_info->cur_isles_number--; + list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list); + } + + if (list_empty(&p_archipelago->isles_list)) { + list_del(&p_archipelago->list_entry); + list_add(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); + } +} + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_prev_isle = NULL; + struct qed_ooo_isle *p_isle = NULL; + + if (ooo_isle > 1) { + p_prev_isle = qed_ooo_seek_isle(p_hwfn, + p_ooo_info, cid, ooo_isle - 1); + if (!p_prev_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + ooo_isle - 1, cid); + return; + } + } + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago && (ooo_isle != 1)) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return; + } + + if (!list_empty(&p_ooo_info->free_isles_list)) { + p_isle = list_first_entry(&p_ooo_info->free_isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + if (!list_empty(&p_isle->buffers_list)) { + DP_NOTICE(p_hwfn, "Free isle is not empty\n"); + INIT_LIST_HEAD(&p_isle->buffers_list); + } + } else { + DP_NOTICE(p_hwfn, "No more free isles\n"); + return; + } + + if (!p_archipelago && + !list_empty(&p_ooo_info->free_archipelagos_list)) { + p_archipelago = + list_first_entry(&p_ooo_info->free_archipelagos_list, + struct qed_ooo_archipelago, list_entry); + + list_del(&p_archipelago->list_entry); + if (!list_empty(&p_archipelago->isles_list)) { + DP_NOTICE(p_hwfn, + "Free OOO connection is not empty\n"); + INIT_LIST_HEAD(&p_archipelago->isles_list); + } + p_archipelago->cid = cid; + list_add(&p_archipelago->list_entry, + &p_ooo_info->archipelagos_list); + } else if (!p_archipelago) { + DP_NOTICE(p_hwfn, "No more free OOO connections\n"); + list_add(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + list_add(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + return; + } + + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + p_ooo_info->cur_isles_number++; + p_ooo_info->gen_isles_number++; + + if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number) + p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number; + + if (!p_prev_isle) + list_add(&p_isle->list_entry, &p_archipelago->isles_list); + else + list_add(&p_isle->list_entry, &p_prev_isle->list_entry); +} + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side) +{ + struct qed_ooo_isle *p_isle = NULL; + + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); + if (!p_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", ooo_isle, cid); + return; + } + + if (buffer_side == QED_OOO_LEFT_BUF) + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + else + list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); +} + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_right_isle = NULL; + struct qed_ooo_isle *p_left_isle = NULL; + + p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle + 1); + if (!p_right_isle) { + DP_NOTICE(p_hwfn, + "Right isle %d is not found(cid %d)\n", + left_isle + 1, cid); + return; + } + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + list_del(&p_right_isle->list_entry); + p_ooo_info->cur_isles_number--; + if (left_isle) { + p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle); + if (!p_left_isle) { + DP_NOTICE(p_hwfn, + "Left isle %d is not found(cid %d)\n", + left_isle, cid); + return; + } + list_splice_tail_init(&p_right_isle->buffers_list, + &p_left_isle->buffers_list); + } else { + list_splice_tail_init(&p_right_isle->buffers_list, + &p_ooo_info->ready_buffers_list); + if (list_empty(&p_archipelago->isles_list)) { + list_del(&p_archipelago->list_entry); + list_add(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); + } + } + list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h new file mode 100644 index 000000000000..7a0670a9a074 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -0,0 +1,176 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_OOO_H +#define _QED_OOO_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include "qed.h" + +#define QED_MAX_NUM_ISLES 256 +#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512 + +#define QED_OOO_LEFT_BUF 0 +#define QED_OOO_RIGHT_BUF 1 + +struct qed_ooo_buffer { + struct list_head list_entry; + void *rx_buffer_virt_addr; + dma_addr_t rx_buffer_phys_addr; + u32 rx_buffer_size; + u16 packet_length; + u16 parse_flags; + u16 vlan; + u8 placement_offset; +}; + +struct qed_ooo_isle { + struct list_head list_entry; + struct list_head buffers_list; +}; + +struct qed_ooo_archipelago { + struct list_head list_entry; + struct list_head isles_list; + u32 cid; +}; + +struct qed_ooo_history { + struct ooo_opaque *p_cqes; + u32 head_idx; + u32 num_of_cqes; +}; + +struct qed_ooo_info { + struct list_head free_buffers_list; + struct list_head ready_buffers_list; + struct list_head free_isles_list; + struct list_head free_archipelagos_list; + struct list_head archipelagos_list; + struct qed_ooo_archipelago *p_archipelagos_mem; + struct qed_ooo_isle *p_isles_mem; + struct qed_ooo_history ooo_history; + u32 cur_isles_number; + u32 max_isles_number; + u32 gen_isles_number; +}; + +#if IS_ENABLED(CONFIG_QED_ISCSI) +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe); + +struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn); + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid); + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); + +void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer); + +struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail); + +struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size); + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, struct qed_ooo_buffer *p_buffer); + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side); + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) {} + +static inline struct qed_ooo_info *qed_ooo_alloc( + struct qed_hwfn *p_hwfn) { return NULL; } + +static inline void +qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid) {} + +static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) + {} + +static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) {} + +static inline void qed_ooo_free(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) {} + +static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, + u8 on_tail) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) {} + +static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) {} + +static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, + u8 buffer_side) {} + +static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index b414a0542177..97544205a8c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -82,6 +82,8 @@ 0x1c80000UL #define BAR0_MAP_REG_XSDM_RAM \ 0x1e00000UL +#define BAR0_MAP_REG_YSDM_RAM \ + 0x1e80000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL #define PRS_REG_SEARCH_TCP \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f3a825a8f8d5..2a16547c8966 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2658,7 +2658,6 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); return -ENOMEM; } - memset(roce_ll2, 0, sizeof(*roce_ll2)); roce_ll2->handle = QED_LL2_UNUSED_HANDLE; roce_ll2->cbs = params->cbs; roce_ll2->cb_cookie = params->cb_cookie; @@ -2772,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, /* Tx header */ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, 1 + pkt->n_seg, 0, flags, 0, + QED_LL2_TX_DEST_NW, qed_roce_flavor, pkt->header.baddr, pkt->header.len, pkt, 1); if (rc) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 9b7678f26909..48bfaecaf6dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,3 +1,4 @@ +#include <linux/crc32.h> #include "qed.h" #include "qed_dev_api.h" #include "qed_mcp.h" @@ -75,3 +76,103 @@ int qed_selftest_clock(struct qed_dev *cdev) return rc; } + +int qed_selftest_nvram(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 num_images, i, j, nvm_crc, calc_crc; + struct bist_nvm_image_att image_att; + u8 *buf = NULL; + __be32 val; + int rc; + + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + + /* Acquire from MFW the amount of available images */ + rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) { + DP_ERR(p_hwfn, "Failed getting number of images\n"); + return -EINVAL; + } + + /* Iterate over images and validate CRC */ + for (i = 0; i < num_images; i++) { + /* This mailbox returns information about the image required for + * reading it. + */ + rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, + &image_att, i); + if (rc) { + DP_ERR(p_hwfn, + "Failed getting image index %d attributes\n", + i); + goto err0; + } + + /* After MFW crash dump is collected - the image's CRC stops + * being valid. + */ + if (image_att.image_type == NVM_TYPE_MDUMP) + continue; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", + i, image_att.len); + + /* Allocate a buffer for holding the nvram image */ + buf = kzalloc(image_att.len, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto err0; + } + + /* Read image into buffer */ + rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr, + buf, image_att.len); + if (rc) { + DP_ERR(p_hwfn, + "Failed reading image index %d from nvm.\n", i); + goto err1; + } + + /* Convert the buffer into big-endian format (excluding the + * closing 4 bytes of CRC). + */ + for (j = 0; j < image_att.len - 4; j += 4) { + val = cpu_to_be32(*(u32 *)&buf[j]); + *(u32 *)&buf[j] = (__force u32)val; + } + + /* Calc CRC for the "actual" image buffer, i.e. not including + * the last 4 CRC bytes. + */ + nvm_crc = *(u32 *)(buf + image_att.len - 4); + calc_crc = crc32(0xffffffff, buf, image_att.len - 4); + calc_crc = (__force u32)~cpu_to_be32(calc_crc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc); + + if (calc_crc != nvm_crc) { + rc = -EINVAL; + goto err1; + } + + /* Done with this image; Free to prevent double release + * on subsequent failure. + */ + kfree(buf); + buf = NULL; + } + + qed_ptt_release(p_hwfn, p_ptt); + return 0; + +err1: + kfree(buf); +err0: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index 50eb0b49950f..739ddb730967 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev); * @return int */ int qed_selftest_clock(struct qed_dev *cdev); + +/** + * @brief qed_selftest_nvram - Perform nvram test + * + * @param cdev + * + * @return int + */ +int qed_selftest_nvram(struct qed_dev *cdev); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b2c08e4d2a9b..9c897bc68d05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -110,8 +110,8 @@ union qed_spq_req_comp { }; struct qed_spq_comp_done { - u64 done; - u8 fw_return_code; + unsigned int done; + u8 fw_return_code; }; struct qed_spq_entry { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 2888eb0628f8..a39ef2e7a9a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, - p_hwfn->p_eq->chain.pbl.p_phys_table); + p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, - p_hwfn->p_consq->chain.pbl.p_phys_table); + p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); @@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: - DP_NOTICE(p_hwfn, "Unkown personality %d\n", + DP_NOTICE(p_hwfn, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 9fbaf9429fd0..f022469bdcf8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -24,7 +24,9 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iscsi.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" @@ -35,7 +37,11 @@ ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) -#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** * Blocking Imp. (BLOCK/EBLOCK mode) @@ -48,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, comp_done = (struct qed_spq_comp_done *)cookie; - comp_done->done = 0x1; - comp_done->fw_return_code = fw_return_code; + comp_done->fw_return_code = fw_return_code; - /* make update visible to waiting thread */ - smp_wmb(); + /* Make sure completion done is visible on waiting thread */ + smp_store_release(&comp_done->done, 0x1); } -static int qed_spq_block(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - u8 *p_fw_ret) +static int __qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool sleep_between_iter) { - int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; struct qed_spq_comp_done *comp_done; - int rc; + u32 iter_cnt; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { + iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + : SPQ_BLOCK_DELAY_MAX_ITER; + + while (iter_cnt--) { + /* Validate we receive completion update */ + if (READ_ONCE(comp_done->done) == 1) { + /* Read updated FW return value */ + smp_read_barrier_depends(); if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - usleep_range(5000, 10000); - sleep_count--; + + if (sleep_between_iter) + msleep(SPQ_BLOCK_SLEEP_MS); + else + udelay(SPQ_BLOCK_DELAY_US); + } + + return -EBUSY; +} + +static int qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct qed_spq_comp_done *comp_done; + int rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (!rc) + return 0; } + /* Move to polling with a sleeping period between iterations */ + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); - if (rc != 0) + if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); + goto err; + } /* Retry after drain */ - sleep_count = SPQ_BLOCK_SLEEP_LENGTH; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { - if (p_fw_ret) - *p_fw_ret = comp_done->fw_return_code; - return 0; - } - usleep_range(5000, 10000); - sleep_count--; - } + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - - DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); +err: + DP_NOTICE(p_hwfn, + "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", + le32_to_cpu(p_ent->elem.hdr.cid), + p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + le16_to_cpu(p_ent->elem.hdr.echo)); return -EBUSY; } @@ -245,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, p_eqe->echo, &p_eqe->data); + case PROTOCOLID_ISCSI: + if (!IS_ENABLED(CONFIG_QED_ISCSI)) + return -EINVAL; + if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) { + u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid); + + qed_ooo_release_connection_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid); + return 0; + } + + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + p_eqe->opcode, &p_eqe->data); + } else { + DP_NOTICE(p_hwfn, + "iSCSI async completion is not set\n"); + return -EINVAL; + } default: DP_NOTICE(p_hwfn, "Unknown Async completion for protocol: %d\n", @@ -725,7 +781,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here. */ - rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { /* This is an allocated p_ent which does not need to diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index d2d6621fe0e5..85b09dd1787a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, } static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, - int rel_vf_id, bool b_enabled_only) + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) { if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); @@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, b_enabled_only) return false; + if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && + b_non_malicious) + return false; + return true; } @@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return NULL; } - if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) + if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, + b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", @@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } -static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, + int vfid, bool b_fail_malicious) { /* Check PF supports sriov */ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || @@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) return false; /* Check VF validity */ - if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) return false; return true; } +bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +{ + return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); +} + static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, u16 rel_vf_id, u8 to_disable) { @@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + /* It's possible VF was previously considered malicious */ + vf->b_malicious = false; + rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); if (rc) return rc; @@ -793,37 +808,70 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 rel_vf_id, u16 num_rx_queues) + struct qed_iov_vf_init_params *p_params) { u8 num_of_vf_avaiable_chains = 0; struct qed_vf_info *vf = NULL; + u16 qid, num_irqs; int rc = 0; u32 cids; u8 i; - vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); return -EINVAL; } if (vf->b_init) { - DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); + DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", + p_params->rel_vf_id); return -EINVAL; } + /* Perform sanity checking on the requested queue_id */ + for (i = 0; i < p_params->num_queues; i++) { + u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); + u16 max_vf_qzone = min_vf_qzone + + FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; + + qid = p_params->req_rx_queue[i]; + if (qid < min_vf_qzone || qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", + qid, + p_params->rel_vf_id, + min_vf_qzone, max_vf_qzone); + return -EINVAL; + } + + qid = p_params->req_tx_queue[i]; + if (qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", + qid, p_params->rel_vf_id, max_vf_qzone); + return -EINVAL; + } + + /* If client *really* wants, Tx qid can be shared with PF */ + if (qid < min_vf_qzone) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", + p_params->rel_vf_id, qid, i); + } + /* Limit number of queues according to number of CIDs */ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", - vf->relative_vf_id, num_rx_queues, (u16) cids); - num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); + vf->relative_vf_id, p_params->num_queues, (u16)cids); + num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, p_ptt, - vf, - num_rx_queues); + vf, num_irqs); if (!num_of_vf_avaiable_chains) { DP_ERR(p_hwfn, "no available igu sbs\n"); return -ENOMEM; @@ -834,25 +882,22 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf->num_txqs = num_of_vf_avaiable_chains; for (i = 0; i < vf->num_rxqs; i++) { - u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, - vf->igu_sbs[i]); + struct qed_vf_q_info *p_queue = &vf->vf_queues[i]; - if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { - DP_NOTICE(p_hwfn, - "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", - vf->relative_vf_id, queue_id); - return -EINVAL; - } + p_queue->fw_rx_qid = p_params->req_rx_queue[i]; + p_queue->fw_tx_qid = p_params->req_tx_queue[i]; /* CIDs are per-VF, so no problem having them 0-based. */ - vf->vf_queues[i].fw_rx_qid = queue_id; - vf->vf_queues[i].fw_tx_qid = queue_id; - vf->vf_queues[i].fw_cid = i; + p_queue->fw_cid = i; DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", - vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); + "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n", + vf->relative_vf_id, + i, vf->igu_sbs[i], + p_queue->fw_rx_qid, + p_queue->fw_tx_qid, p_queue->fw_cid); } + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); if (!rc) { vf->b_init = true; @@ -1172,8 +1217,19 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->num_active_rxqs = 0; - for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) - p_vf->vf_queues[i].rxq_active = 0; + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i]; + + if (p_queue->p_rx_cid) { + qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); + p_queue->p_rx_cid = NULL; + } + + if (p_queue->p_tx_cid) { + qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); + p_queue->p_tx_cid = NULL; + } + } memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); @@ -1579,21 +1635,21 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, /* Update all the Rx queues */ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { - u16 qid; + struct qed_queue_cid *p_cid; - if (!p_vf->vf_queues[i].rxq_active) + p_cid = p_vf->vf_queues[i].p_rx_cid; + if (!p_cid) continue; - qid = p_vf->vf_queues[i].fw_rx_qid; - - rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, + rc = qed_sp_eth_rx_queues_update(p_hwfn, + (void **)&p_cid, 1, 0, 1, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to send Rx update fo queue[0x%04x]\n", - qid); + p_cid->rel.queue_id); return rc; } } @@ -1767,23 +1823,34 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; + struct qed_vf_q_info *p_queue; struct vfpf_start_rxq_tlv *req; bool b_legacy_vf = false; int rc; - memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_rxq; if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; - params.vf_qid = req->rx_qid; + /* Acquire a new queue-cid */ + p_queue = &vf->vf_queues[req->rx_qid]; + + memset(¶ms, 0, sizeof(params)); + params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; params.sb = req->hw_sb; params.sb_idx = req->sb_index; + p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, + vf->opaque_fid, + p_queue->fw_cid, + req->rx_qid, ¶ms); + if (!p_queue->p_rx_cid) + goto out; + /* Legacy VFs have their Producers in a different location, which they * calculate on their own and clean the producer prior to this. */ @@ -1796,21 +1863,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 0); } + p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf; - rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, - vf->vf_queues[req->rx_qid].fw_cid, - ¶ms, - vf->abs_vf_id + 0x10, - req->bd_max_bytes, - req->rxq_addr, - req->cqe_pbl_addr, req->cqe_pbl_size, - b_legacy_vf); - + rc = qed_eth_rxq_start_ramrod(p_hwfn, + p_queue->p_rx_cid, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, req->cqe_pbl_size); if (rc) { status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); + p_queue->p_rx_cid = NULL; } else { status = PFVF_STATUS_SUCCESS; - vf->vf_queues[req->rx_qid].rxq_active = true; vf->num_active_rxqs++; } @@ -1867,7 +1932,9 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; union qed_qm_pq_params pq_params; struct vfpf_start_txq_tlv *req; + struct qed_vf_q_info *p_queue; int rc; + u16 pq; /* Prepare the parameters which would choose the right PQ */ memset(&pq_params, 0, sizeof(pq_params)); @@ -1881,24 +1948,31 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; + /* Acquire a new queue-cid */ + p_queue = &vf->vf_queues[req->tx_qid]; + + params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; params.sb = req->hw_sb; params.sb_idx = req->sb_index; - rc = qed_sp_eth_txq_start_ramrod(p_hwfn, - vf->opaque_fid, - vf->vf_queues[req->tx_qid].fw_cid, - ¶ms, - vf->abs_vf_id + 0x10, - req->pbl_addr, - req->pbl_size, &pq_params); + p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, + vf->opaque_fid, + p_queue->fw_cid, + req->tx_qid, ¶ms); + if (!p_queue->p_tx_cid) + goto out; + pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params); + rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, + req->pbl_addr, req->pbl_size, pq); if (rc) { status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); + p_queue->p_tx_cid = NULL; } else { status = PFVF_STATUS_SUCCESS; - vf->vf_queues[req->tx_qid].txq_active = true; } out: @@ -1909,6 +1983,7 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 rxq_id, u8 num_rxqs, bool cqe_completion) { + struct qed_vf_q_info *p_queue; int rc = 0; int qid; @@ -1916,16 +1991,18 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, return -EINVAL; for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { - if (vf->vf_queues[qid].rxq_active) { - rc = qed_sp_eth_rx_queue_stop(p_hwfn, - vf->vf_queues[qid]. - fw_rx_qid, false, - cqe_completion); + p_queue = &vf->vf_queues[qid]; - if (rc) - return rc; - } - vf->vf_queues[qid].rxq_active = false; + if (!p_queue->p_rx_cid) + continue; + + rc = qed_eth_rx_queue_stop(p_hwfn, + p_queue->p_rx_cid, + false, cqe_completion); + if (rc) + return rc; + + vf->vf_queues[qid].p_rx_cid = NULL; vf->num_active_rxqs--; } @@ -1936,22 +2013,24 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) { int rc = 0; + struct qed_vf_q_info *p_queue; int qid; if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) return -EINVAL; for (qid = txq_id; qid < txq_id + num_txqs; qid++) { - if (vf->vf_queues[qid].txq_active) { - rc = qed_sp_eth_tx_queue_stop(p_hwfn, - vf->vf_queues[qid]. - fw_tx_qid); + p_queue = &vf->vf_queues[qid]; + if (!p_queue->p_tx_cid) + continue; - if (rc) - return rc; - } - vf->vf_queues[qid].txq_active = false; + rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); + if (rc) + return rc; + + p_queue->p_tx_cid = NULL; } + return rc; } @@ -2006,10 +2085,11 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_update_rxq_tlv *req; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_FAILURE; u8 complete_event_flg; u8 complete_cqe_flg; u16 qid; @@ -2020,29 +2100,36 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + /* Validate inputs */ + if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF || + !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) { + DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, req->num_rxqs); + goto out; + } + for (i = 0; i < req->num_rxqs; i++) { qid = req->rx_qid + i; - - if (!vf->vf_queues[qid].rxq_active) { - DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", - qid); - status = PFVF_STATUS_FAILURE; - break; + if (!vf->vf_queues[qid].p_rx_cid) { + DP_INFO(p_hwfn, + "VF[%d] rx_qid = %d isn`t active!\n", + vf->relative_vf_id, qid); + goto out; } - rc = qed_sp_eth_rx_queues_update(p_hwfn, - vf->vf_queues[qid].fw_rx_qid, - 1, - complete_cqe_flg, - complete_event_flg, - QED_SPQ_MODE_EBLOCK, NULL); - - if (rc) { - status = PFVF_STATUS_FAILURE; - break; - } + handlers[i] = vf->vf_queues[qid].p_rx_cid; } + rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, + req->num_rxqs, + complete_cqe_flg, + complete_event_flg, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) + goto out; + + status = PFVF_STATUS_SUCCESS; +out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, length, status); } @@ -2253,7 +2340,7 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "rss_ind_table[%d] = %d, rxq is out of range\n", i, q_idx); - else if (!vf->vf_queues[q_idx].rxq_active) + else if (!vf->vf_queues[q_idx].p_rx_cid) DP_NOTICE(p_hwfn, "rss_ind_table[%d] = %d, rxq is not active\n", i, q_idx); @@ -2804,6 +2891,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, return rc; } + /* Workaround to make VF-PF channel ready, as FW + * doesn't do that as a part of FLR. + */ + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. */ @@ -2942,7 +3036,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx->first_tlv = mbx->req_virt->first_tlv; /* check if tlv type is known */ - if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && + !p_vf->b_malicious) { switch (mbx->first_tlv.tl.type) { case CHANNEL_TLV_ACQUIRE: qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); @@ -2984,6 +3079,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); break; } + } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_MALICIOUS); } else { /* unknown TLV - this may belong to a VF driver from the future * - a version written after this PF driver was written, which @@ -3033,20 +3137,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); } -static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, - u16 abs_vfid, struct regpair *vf_msg) +static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, + u16 abs_vfid) { - u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; - struct qed_vf_info *p_vf; + u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; - if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { + if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", + "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", abs_vfid); - return 0; + return NULL; } - p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; + + return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; +} + +static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, + u16 abs_vfid, struct regpair *vf_msg) +{ + struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, + abs_vfid); + + if (!p_vf) + return 0; /* List the physical address of the request so that handler * could later on copy the message from it. @@ -3060,6 +3174,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, return 0; } +static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct malicious_vf_eqe_data *p_data) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); + + if (!p_vf) + return; + + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + + p_vf->b_malicious = true; +} + int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data) { @@ -3067,6 +3198,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); + case COMMON_EVENT_MALICIOUS_VF: + qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); + return 0; default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); @@ -3083,7 +3217,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) goto out; for (i = rel_vf_id; i < p_iov->total_vfs; i++) - if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) + if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) return i; out: @@ -3130,6 +3264,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, return; } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced MAC to malicious VF [%d]\n", vfid); + return; + } + feature = 1 << MAC_ADDR_FORCED; memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); @@ -3153,6 +3293,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, return; } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced vlan to malicious VF [%d]\n", vfid); + return; + } + feature = 1 << VLAN_ADDR_FORCED; vf_info->bulletin.p_virt->pvid = pvid; if (pvid) @@ -3367,7 +3513,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) qed_for_each_vf(hwfn, j) { int k; - if (!qed_iov_is_valid_vfid(hwfn, j, true)) + if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) continue; /* Wait until VF is disabled before releasing */ @@ -3394,9 +3540,28 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) return 0; } +static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, + u16 vfid, + struct qed_iov_vf_init_params *params) +{ + u16 base, i; + + /* Since we have an equal resource distribution per-VF, and we assume + * PF has acquired the QED_PF_L2_QUE first queues, we start setting + * sequentially from there. + */ + base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; + + params->rel_vf_id = vfid; + for (i = 0; i < params->num_queues; i++) { + params->req_rx_queue[i] = base + i; + params->req_tx_queue[i] = base + i; + } +} + static int qed_sriov_enable(struct qed_dev *cdev, int num) { - struct qed_sb_cnt_info sb_cnt_info; + struct qed_iov_vf_init_params params; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -3405,11 +3570,17 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) return -EINVAL; } + memset(¶ms, 0, sizeof(params)); + /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { struct qed_hwfn *hwfn = &cdev->hwfns[j]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); - int num_sbs = 0, limit = 16; + + /* Make sure not to use more than 16 queues per VF */ + params.num_queues = min_t(int, + FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, + 16); if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); @@ -3417,19 +3588,12 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } - if (IS_MF_DEFAULT(hwfn)) - limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; - - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(hwfn, &sb_cnt_info); - num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); - for (i = 0; i < num; i++) { - if (!qed_iov_is_valid_vfid(hwfn, i, false)) + if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) continue; - rc = qed_iov_init_hw_for_vf(hwfn, - ptt, i, num_sbs / num); + qed_sriov_enable_qid_config(hwfn, i, ¶ms); + rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); if (rc) { DP_ERR(cdev, "Failed to enable VF[%d]\n", i); qed_ptt_release(hwfn, ptt); @@ -3477,7 +3641,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) return -EINVAL; } - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; @@ -3509,7 +3673,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) return -EINVAL; } - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; @@ -3543,7 +3707,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, if (IS_VF(cdev)) return -EINVAL; - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; @@ -3647,7 +3811,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev, if (IS_VF(cdev)) return -EINVAL; - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 0dd23e409b3f..509c02b4772e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -58,6 +58,23 @@ struct qed_public_vf_info { int tx_rate; }; +struct qed_iov_vf_init_params { + u16 rel_vf_id; + + /* Number of requested Queues; Currently, don't support different + * number of Rx/Tx queues. + */ + + u16 num_queues; + + /* Allow the client to choose which qzones to use for Rx/Tx, + * and which queue_base to use for Tx queues on a per-queue basis. + * Notice values should be relative to the PF resources. + */ + u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF]; + u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF]; +}; + /* This struct is part of qed_dev and contains data relevant to all hwfns; * Initialized only if SR-IOV cpabability is exposed in PCIe config space. */ @@ -99,10 +116,10 @@ struct qed_iov_vf_mbx { struct qed_vf_q_info { u16 fw_rx_qid; + struct qed_queue_cid *p_rx_cid; u16 fw_tx_qid; + struct qed_queue_cid *p_tx_cid; u8 fw_cid; - u8 rxq_active; - u8 txq_active; }; enum vf_state { @@ -132,6 +149,7 @@ struct qed_vf_info { struct qed_iov_vf_mbx vf_mbx; enum vf_state state; bool b_init; + bool b_malicious; u8 to_disable; struct qed_bulletin bulletin; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index abf5bf11f865..60b31a8ede73 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -388,18 +388,18 @@ free_p_iov: #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) -int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_qid, - u16 sb, - u8 sb_index, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, void __iomem **pp_prod) +int +qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_rxq_tlv *req; + u8 rx_qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ @@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; /* If PF is legacy, we'll need to calculate producers ourselves * as well as clean them. */ - if (pp_prod && p_iov->b_pre_fp_hsi) { + if (p_iov->b_pre_fp_hsi) { u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; u32 init_prod_val = 0; - *pp_prod = (u8 __iomem *)p_hwfn->regview + - MSTORM_QZONE_START(p_hwfn->cdev) + - hw_qid * MSTORM_QZONE_SIZE; + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, } /* Learn the address of the producer from the response */ - if (pp_prod && !p_iov->b_pre_fp_hsi) { + if (!p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; @@ -462,7 +463,8 @@ exit: return rc; } -int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, bool cqe_completion) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_rxqs_tlv *req; @@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); - req->rx_qid = rx_qid; + req->rx_qid = p_cid->rel.queue_id; req->num_rxqs = 1; req->cqe_completion = cqe_completion; @@ -496,28 +498,28 @@ exit: return rc; } -int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, void __iomem **pp_doorbell) +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; + u16 qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); - req->tx_qid = tx_queue_id; + req->tx_qid = qid; /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, goto exit; } - if (pp_doorbell) { - /* Modern PFs provide the actual offsets, while legacy - * provided only the queue id. - */ - if (!p_iov->b_pre_fp_hsi) { - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - resp->offset; - } else { - u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; - u32 db_addr; - - db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - db_addr; - } + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[qid]; - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - tx_queue_id, *pp_doorbell, resp->offset); + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr_vf(cid, + DQ_DEMS_LEGACY); } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + qid, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } -int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_txqs_tlv *req; @@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); - req->tx_qid = tx_qid; + req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; /* add list termination tlv */ @@ -1171,6 +1169,13 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; } +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) +{ + struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; + + *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; +} + bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) { struct qed_bulletin_content *bulletin; @@ -1230,8 +1235,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, &is_mac_forced); - if (is_mac_exist && is_mac_forced && cookie) - ops->force_mac(cookie, mac); + if (is_mac_exist && cookie) + ops->force_mac(cookie, mac, !!is_mac_forced); /* Always update link configuration according to bulletin */ qed_link_update(hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 35db7a28aa13..11eb3854e6f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -40,6 +40,7 @@ enum { PFVF_STATUS_NOT_SUPPORTED, PFVF_STATUS_NO_RESOURCE, PFVF_STATUS_FORCED, + PFVF_STATUS_MALICIOUS, }; /* vf pf channel tlvs */ @@ -622,6 +623,14 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** + * @brief Get number of MAC filters allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated MAC filters + */ +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); + +/** * @brief Check if VF can set a MAC address * * @param p_hwfn @@ -657,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** * @brief VF - start the RX Queue by sending a message to the PF * @param p_hwfn - * @param cid - zero based within the VF - * @param rx_queue_id - zero based within the VF - * @param sb - VF status block for this queue - * @param sb_index - Index within the status block + * @param p_cid - Only relative fields are relevant * @param bd_max_bytes - maximum number of bytes per bd * @param bd_chain_phys_addr - physical address of bd chain * @param cqe_pbl_addr - physical address of pbl @@ -671,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); * @return int */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, @@ -693,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, * * @return int */ -int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, void __iomem **pp_doorbell); +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell); /** * @brief VF - stop the RX queue by sending a message to the PF * * @param p_hwfn - * @param rx_qid + * @param p_cid * @param cqe_completion * * @return int */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, - u16 rx_qid, bool cqe_completion); + struct qed_queue_cid *p_cid, bool cqe_completion); /** * @brief VF - stop the TX queue by sending a message to the PF @@ -720,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, * * @return int */ -int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid); +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** * @brief VF - send a vport update command @@ -871,6 +874,11 @@ static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, { } +static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, + u8 *num_mac_filters) +{ +} + static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) { return false; @@ -888,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) } static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_adr, dma_addr_t cqe_pbl_addr, @@ -900,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { @@ -910,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, - u16 rx_qid, bool cqe_completion) + struct qed_queue_cid *p_cid, + bool cqe_completion) { return -EINVAL; } -static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) { return -EINVAL; } |