diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_dev.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 740 |
1 files changed, 694 insertions, 46 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b7d100f6bd6f..089016f46f26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -22,6 +22,7 @@ #include <linux/qed/qed_if.h> #include "qed.h" #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -30,6 +31,11 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +static spinlock_t qm_lock; +static bool qm_lock_init = false; /* API common to all protocols */ enum BAR_ID { @@ -40,10 +46,14 @@ enum BAR_ID { static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { - u32 bar_reg = (bar_id == BAR_ID_0 ? - PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val; + + if (IS_VF(p_hwfn->cdev)) + return 1 << 17; + val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); if (val) return 1 << (val + 15); @@ -105,12 +115,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->qm_vport_params = NULL; kfree(qm_info->qm_port_params); qm_info->qm_port_params = NULL; + kfree(qm_info->wfq_data); + qm_info->wfq_data = NULL; } void qed_resc_free(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + kfree(cdev->fw_data); cdev->fw_data = NULL; @@ -134,20 +149,27 @@ void qed_resc_free(struct qed_dev *cdev) qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_int_free(p_hwfn); + qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); + qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); } } static int qed_init_qm_info(struct qed_hwfn *p_hwfn) { + u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; - u8 num_vports, i, vport_id, num_ports; u16 num_pqs, multi_cos_tcs = 1; + u16 num_vfs = 0; +#ifdef CONFIG_QED_SRIOV + if (p_hwfn->cdev->p_iov_info) + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; +#endif memset(qm_info, 0, sizeof(*qm_info)); - num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ + num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); /* Sanity checking that setup requires legal number of resources */ @@ -175,25 +197,50 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) if (!qm_info->qm_port_params) goto alloc_err; + qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), + GFP_KERNEL); + if (!qm_info->wfq_data) + goto alloc_err; + vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); /* First init per-TC PQs */ for (i = 0; i < multi_cos_tcs; i++) { - struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; - - params->vport_id = vport_id; - params->tc_id = p_hwfn->hw_info.non_offload_tc; - params->wrr_group = 1; + struct init_qm_pq_params *params = + &qm_info->qm_pq_params[curr_queue++]; + + if (p_hwfn->hw_info.personality == QED_PCI_ETH) { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.non_offload_tc; + params->wrr_group = 1; + } else { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.offload_tc; + params->wrr_group = 1; + } } /* Then init pure-LB PQ */ - qm_info->pure_lb_pq = i; - qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); - qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; - qm_info->qm_pq_params[i].wrr_group = 1; - i++; + qm_info->pure_lb_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = + (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; qm_info->offload_pq = 0; + /* Then init per-VF PQs */ + vf_offset = curr_queue; + for (i = 0; i < num_vfs; i++) { + /* First vport is used by the PF */ + qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + qm_info->vf_queues_offset = vf_offset; qm_info->num_pqs = num_pqs; qm_info->num_vports = num_vports; @@ -211,29 +258,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + qm_info->num_vf_pqs = num_vfs; + qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + + for (i = 0; i < qm_info->num_vports; i++) + qm_info->qm_vport_params[i].vport_wfq = 1; qm_info->pf_wfq = 0; qm_info->pf_rl = 0; qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; return 0; alloc_err: DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); - kfree(qm_info->qm_pq_params); - kfree(qm_info->qm_vport_params); - kfree(qm_info->qm_port_params); - + qed_qm_info_free(p_hwfn); return -ENOMEM; } +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime arrat + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool b_rc; + int rc; + + /* qm_info is allocated in qed_init_qm_info() which is already called + * from qed_resc_alloc() or previous call of qed_qm_reconf(). + * The allocated size may change each init, so we free it before next + * allocation. + */ + qed_qm_info_free(p_hwfn); + + /* initialize qed's qm data structure */ + rc = qed_init_qm_info(p_hwfn); + if (rc) + return rc; + + /* stop PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + /* clear the QM_PF runtime phase leftovers from previous init */ + qed_init_clear_rt_data(p_hwfn); + + /* prepare QM portion of runtime array */ + qed_qm_init_pf(p_hwfn); + + /* activate init tool on runtime array */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + if (rc) + return rc; + + /* start PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + return 0; +} + int qed_resc_alloc(struct qed_dev *cdev) { struct qed_consq *p_consq; struct qed_eq *p_eq; int i, rc = 0; + if (IS_VF(cdev)) + return rc; + cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); if (!cdev->fw_data) return -ENOMEM; @@ -308,6 +417,10 @@ int qed_resc_alloc(struct qed_dev *cdev) if (rc) goto alloc_err; + rc = qed_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + /* EQ */ p_eq = qed_eq_alloc(p_hwfn, 256); if (!p_eq) { @@ -330,6 +443,14 @@ int qed_resc_alloc(struct qed_dev *cdev) "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } + + /* DCBX initialization */ + rc = qed_dcbx_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for dcbx structure\n"); + goto alloc_err; + } } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@ -350,6 +471,9 @@ void qed_resc_setup(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -365,14 +489,15 @@ void qed_resc_setup(struct qed_dev *cdev) p_hwfn->mcp_info->mfw_mb_length); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); } } #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 id) + struct qed_ptt *p_ptt, u16 id, bool is_vf) { u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; @@ -380,6 +505,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); + if (is_vf) + id += 0x10; + command |= X_FINAL_CLEANUP_AGG_INT << SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; @@ -492,7 +620,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u32 concrete_fid; int rc = 0; + u8 vf_id; qed_init_cau_rt_data(cdev); @@ -542,6 +672,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, 0x20b4, qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { + concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); + qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + return rc; } @@ -558,6 +696,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_tunn_start_params *p_tunn, int hw_mode, bool b_hw_start, enum qed_int_mode int_mode, @@ -574,7 +713,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; /* Update rate limit once we'll actually have a link */ - p_hwfn->qm_info.pf_rl = 100; + p_hwfn->qm_info.pf_rl = 100000; } qed_cxt_hw_init_pf(p_hwfn); @@ -603,7 +742,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); + rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); if (rc != 0) return rc; @@ -625,7 +764,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ - rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); + rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, + allow_npar_tx_switch); if (rc) DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); } @@ -672,6 +812,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, } int qed_hw_init(struct qed_dev *cdev, + struct qed_tunn_start_params *p_tunn, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch, @@ -680,13 +821,20 @@ int qed_hw_init(struct qed_dev *cdev, u32 load_code, param; int rc, mfw_rc, i; - rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) - return rc; + if (IS_PF(cdev)) { + rc = qed_init_fw_data(cdev, bin_fw_data); + if (rc != 0) + return rc; + } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + p_hwfn->b_int_enabled = 1; + continue; + } + /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -708,6 +856,11 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); + if (!qm_lock_init) { + spin_lock_init(&qm_lock); + qm_lock_init = true; + } + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -724,7 +877,7 @@ int qed_hw_init(struct qed_dev *cdev, /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, - p_hwfn->hw_info.hw_mode, + p_tunn, p_hwfn->hw_info.hw_mode, b_hw_start, int_mode, allow_npar_tx_switch); break; @@ -749,6 +902,20 @@ int qed_hw_init(struct qed_dev *cdev, return mfw_rc; } + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "sending phony dcbx set command to trigger DCBx attention handling\n"); + mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &load_code, ¶m); + if (mfw_rc) { + DP_NOTICE(p_hwfn, + "Failed to send DCBX attention request\n"); + return mfw_rc; + } + p_hwfn->hw_init_done = true; } @@ -811,6 +978,11 @@ int qed_hw_stop(struct qed_dev *cdev) DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } + /* mark the hw as uninitialized... */ p_hwfn->hw_init_done = false; @@ -842,15 +1014,16 @@ int qed_hw_stop(struct qed_dev *cdev) usleep_range(1000, 2000); } - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. - */ - t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], - cdev->hwfns[0].p_main_ptt, - false); - if (t_rc != 0) - rc = t_rc; + if (IS_PF(cdev)) { + /* Disable DMAE in PXP - in CMT, this should only be done for + * first hw-function, and only after all transactions have + * stopped for all active hw-functions. + */ + t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], + cdev->hwfns[0].p_main_ptt, false); + if (t_rc != 0) + rc = t_rc; + } return rc; } @@ -861,7 +1034,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, @@ -885,6 +1063,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) { + if (IS_VF(p_hwfn->cdev)) + return; + /* Re-open incoming traffic */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); @@ -914,6 +1095,13 @@ int qed_hw_reset(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + rc = qed_vf_pf_reset(p_hwfn); + if (rc) + return rc; + continue; + } + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); /* Check for incorrect states */ @@ -1009,13 +1197,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; + u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int num_funcs, i; - - num_funcs = MAX_NUM_PFS_BB; + int i, max_vf_vlan_filters; memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + +#ifdef CONFIG_QED_SRIOV + max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; +#else + max_vf_vlan_filters = 0; +#endif + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); resc_num[QED_SB] = min_t(u32, @@ -1220,6 +1414,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } +static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 reg_function_hide, tmp, eng_mask; + u8 num_funcs; + + num_funcs = MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT, only the "even" functions are enabled, and thus the + * number of functions for both hwfns is learnt from the same bits. + */ + reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); + + if (reg_function_hide & 0x1) { + if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } + + /* Get the number of the enabled functions on the engine */ + tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + p_hwfn->rel_pf_id, + p_hwfn->abs_pf_id, + p_hwfn->num_funcs_on_engine); +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1228,6 +1467,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, u32 port_mode; int rc; + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_iov_hw_info(p_hwfn); + if (rc) + return rc; + } + /* Read the port mode */ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); @@ -1271,6 +1517,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = protocol; } + qed_get_num_funcs(p_hwfn, p_ptt); + qed_hw_get_resc(p_hwfn); return rc; @@ -1336,6 +1584,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_hw_prepare(p_hwfn); + /* Validate that chip access is feasible */ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { DP_ERR(p_hwfn, @@ -1387,6 +1638,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err2: + if (IS_LEAD_HWFN(p_hwfn)) + qed_iov_free_hw_info(p_hwfn->cdev); qed_mcp_free(p_hwfn); err1: qed_hw_hwfn_free(p_hwfn); @@ -1401,7 +1654,8 @@ int qed_hw_prepare(struct qed_dev *cdev, int rc; /* Store the precompiled init data ptrs */ - qed_init_iro_array(cdev); + if (IS_PF(cdev)) + qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ rc = qed_hw_prepare_single(p_hwfn, @@ -1433,9 +1687,11 @@ int qed_hw_prepare(struct qed_dev *cdev, * initiliazed hwfn 0. */ if (rc) { - qed_init_free(p_hwfn); - qed_mcp_free(p_hwfn); - qed_hw_hwfn_free(p_hwfn); + if (IS_PF(cdev)) { + qed_init_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + } } } @@ -1449,10 +1705,17 @@ void qed_hw_remove(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + qed_vf_pf_release(p_hwfn); + continue; + } + qed_init_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn); } + + qed_iov_free_hw_info(cdev); } int qed_chain_alloc(struct qed_dev *cdev, @@ -1593,3 +1856,388 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } + +/* Calculate final WFQ values for all vports and configure them. + * After this configuration each vport will have + * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) + */ +static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / + min_pf_rate; + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, + u32 min_pf_rate) + +{ + int i; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) + p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; +} + +static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + qed_init_wfq_default_param(p_hwfn, min_pf_rate); + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +/* This function performs several validations for WFQ + * configuration and required min rate for a given vport + * 1. req_rate must be greater than one percent of min_pf_rate. + * 2. req_rate should not cause other vports [not configured for WFQ explicitly] + * rates to get less than one percent of min_pf_rate. + * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. + */ +static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, + u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + /* Accounting for the vports which are configured for WFQ explicitly */ + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && + p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = num_vports - req_count; + + if (req_rate < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return -EINVAL; + } + + if (num_vports > QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Number of vports is greater than %d\n", + QED_WFQ_UNIT); + return -EINVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return -EINVAL; + } + + total_left_rate = min_pf_rate - total_req_min_rate; + + left_rate_per_vp = total_left_rate / non_requested_count; + if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + left_rate_per_vp, min_pf_rate); + return -EINVAL; + } + + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return 0; +} + +static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 vp_id, u32 rate) +{ + struct qed_mcp_link_state *p_link; + int rc = 0; + + p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (rc == 0) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, + "Validation failed while configuring min rate\n"); + + return rc; +} + +static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + bool use_wfq = false; + int rc = 0; + u16 i; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 rate; + + if (!p_hwfn->qm_info.wfq_data[i].configured) + continue; + + rate = p_hwfn->qm_info.wfq_data[i].min_speed; + use_wfq = true; + + rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc) { + DP_NOTICE(p_hwfn, + "WFQ validation failed while configuring min rate\n"); + break; + } + } + + if (!rc && use_wfq) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + + return rc; +} + +/* Main API for qed clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) +{ + int i, rc = -EINVAL; + + /* Currently not supported; Might change in future */ + if (cdev->num_hwfns > 1) { + DP_NOTICE(cdev, + "WFQ configuration is not supported for this device\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (!rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_hwfn->p_dpc_ptt, + min_pf_rate); + } +} + +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed && (max_bw != 100)) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + p_hwfn->qm_info.pf_rl = p_link->speed; + + /* Since the limiter also affects Tx-switched traffic, we don't want it + * to limit such traffic in case there's no actual limit. + * In that case, set limit to imaginary high boundary. + */ + if (max_bw == 100) + p_hwfn->qm_info.pf_rl = 100000; + + rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_hwfn->qm_info.pf_rl); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) +{ + int i, rc = -EINVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + qed_ptt_release(p_hwfn, p_ptt); + + if (rc) + break; + } + + return rc; +} + +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + p_hwfn->qm_info.pf_wfq = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) +{ + int i, rc = -EINVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + + memset(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); +} |