From bb87ee0efb7396d79ba5f37ff8e8721d01c87d4a Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 28 Feb 2019 15:25:48 -0800 Subject: ice: Create framework for VSI queue context This patch introduces a framework to store queue specific information in VSI queue contexts. Currently VSI queue context (represented by struct ice_q_ctx) only has q_handle as a member. In future patches, this structure will be updated to hold queue specific information. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 62 +++++++++++++-- drivers/net/ethernet/intel/ice/ice_common.h | 11 +-- drivers/net/ethernet/intel/ice/ice_lib.c | 99 ++++++++++++++---------- drivers/net/ethernet/intel/ice/ice_sched.c | 54 +++++++++++-- drivers/net/ethernet/intel/ice/ice_switch.c | 22 ++++++ drivers/net/ethernet/intel/ice/ice_switch.h | 9 +++ drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 4 +- 7 files changed, 205 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2937c6be1aee..dce07882f7e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2790,11 +2790,36 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) return 0; } +/** + * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * @tc: TC number + * @q_handle: software queue handle + */ +static struct ice_q_ctx * +ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) +{ + struct ice_vsi_ctx *vsi; + struct ice_q_ctx *q_ctx; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi) + return NULL; + if (q_handle >= vsi->num_lan_q_entries[tc]) + return NULL; + if (!vsi->lan_q_ctx[tc]) + return NULL; + q_ctx = vsi->lan_q_ctx[tc]; + return &q_ctx[q_handle]; +} + /** * ice_ena_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number + * @q_handle: software queue handle * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added * @buf_size: size of buffer for indirect command @@ -2803,12 +2828,13 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * This function adds one LAN queue */ enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, - struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; + struct ice_q_ctx *q_ctx; enum ice_status status; struct ice_hw *hw; @@ -2825,6 +2851,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, mutex_lock(&pi->sched_lock); + q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); + if (!q_ctx) { + ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", + q_handle); + status = ICE_ERR_PARAM; + goto ena_txq_exit; + } + /* find a parent node */ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); @@ -2851,7 +2885,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); if (status) { - ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", + ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", le16_to_cpu(buf->txqs[0].txq_id), hw->adminq.sq_last_status); goto ena_txq_exit; @@ -2859,6 +2893,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; + q_ctx->q_handle = q_handle; /* add a leaf node into schduler tree queue layer */ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); @@ -2871,7 +2906,10 @@ ena_txq_exit: /** * ice_dis_vsi_txq * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number * @num_queues: number of queues + * @q_handles: pointer to software queue handle array * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids * @rst_src: if called due to reset, specifies the reset source @@ -2881,12 +2919,14 @@ ena_txq_exit: * This function removes queues and their corresponding nodes in SW DB */ enum ice_status -ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, +ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, + u16 *q_handles, u16 *q_ids, u32 *q_teids, + enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item qg_list; + struct ice_q_ctx *q_ctx; u16 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) @@ -2909,6 +2949,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); if (!node) continue; + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); + if (!q_ctx) { + ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n", + q_handles[i]); + continue; + } + if (q_ctx->q_handle != q_handles[i]) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n", + q_ctx->q_handle, q_handles[i]); + continue; + } qg_list.parent_teid = node->info.parent_teid; qg_list.num_qs = 1; qg_list.q_id[0] = cpu_to_le16(q_ids[i]); @@ -2919,6 +2970,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, if (status) break; ice_free_sched_node(pi, node); + q_ctx->q_handle = ICE_INVAL_Q_HANDLE; } mutex_unlock(&pi->sched_lock); return status; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index faefc45e4a1e..f1ddebf45231 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -99,15 +99,16 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); enum ice_status -ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, - struct ice_sq_cd *cmd_details); +ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, + u16 *q_handle, u16 *q_ids, u32 *q_teids, + enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd); enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, - struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index f31129e4e9cf..fa8ebd8a10ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1715,8 +1715,8 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) rings[q_idx]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - num_q_grps, qg_buf, buf_len, - NULL); + i, num_q_grps, qg_buf, + buf_len, NULL); if (status) { dev_err(&vsi->back->pdev->dev, "Failed to set LAN Tx queue context, error: %d\n", @@ -2033,10 +2033,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; + int tc, q_idx = 0, err = 0; + u16 *q_ids, *q_handles, i; enum ice_status status; u32 *q_teids, val; - u16 *q_ids, i; - int err = 0; if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) return -EINVAL; @@ -2053,50 +2053,71 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, goto err_alloc_q_ids; } - /* set up the Tx queue list to be disabled */ - ice_for_each_txq(vsi, i) { - u16 v_idx; + q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, + sizeof(*q_handles), GFP_KERNEL); + if (!q_handles) { + err = -ENOMEM; + goto err_alloc_q_handles; + } - if (!rings || !rings[i] || !rings[i]->q_vector) { - err = -EINVAL; - goto err_out; - } + /* set up the Tx queue list to be disabled for each enabled TC */ + ice_for_each_traffic_class(tc) { + if (!(vsi->tc_cfg.ena_tc & BIT(tc))) + break; + + for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { + u16 v_idx; + + if (!rings || !rings[i] || !rings[i]->q_vector) { + err = -EINVAL; + goto err_out; + } - q_ids[i] = vsi->txq_map[i + offset]; - q_teids[i] = rings[i]->txq_teid; + q_ids[i] = vsi->txq_map[q_idx + offset]; + q_teids[i] = rings[q_idx]->txq_teid; + q_handles[i] = i; - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); - /* software is expected to wait for 100 ns */ - ndelay(100); + /* software is expected to wait for 100 ns */ + ndelay(100); - /* trigger a software interrupt for the vector associated to - * the queue to schedule NAPI handler + /* trigger a software interrupt for the vector + * associated to the queue to schedule NAPI handler + */ + v_idx = rings[i]->q_vector->v_idx; + wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + q_idx++; + } + status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, + vsi->num_txq, q_handles, q_ids, + q_teids, rst_src, rel_vmvf_num, NULL); + + /* if the disable queue command was exercised during an active + * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not + * an error as the reset operation disables queues at the + * hardware level anyway. */ - v_idx = rings[i]->q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); - } - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, - rst_src, rel_vmvf_num, NULL); - /* if the disable queue command was exercised during an active reset - * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as - * the reset operation disables queues at the hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_info(&pf->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { + dev_err(&pf->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", + status); + err = -ENODEV; + } } err_out: + devm_kfree(&pf->pdev->dev, q_handles); + +err_alloc_q_handles: devm_kfree(&pf->pdev->dev, q_ids); err_alloc_q_ids: diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 124feaf0e730..8d49f83be7a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -532,6 +532,50 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, return status; } +/** + * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * @tc: TC number + * @new_numqs: number of queues + */ +static enum ice_status +ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) +{ + struct ice_vsi_ctx *vsi_ctx; + struct ice_q_ctx *q_ctx; + + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + /* allocate LAN queue contexts */ + if (!vsi_ctx->lan_q_ctx[tc]) { + vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), + new_numqs, + sizeof(*q_ctx), + GFP_KERNEL); + if (!vsi_ctx->lan_q_ctx[tc]) + return ICE_ERR_NO_MEMORY; + vsi_ctx->num_lan_q_entries[tc] = new_numqs; + return 0; + } + /* num queues are increased, update the queue contexts */ + if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { + u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; + + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, + sizeof(*q_ctx), GFP_KERNEL); + if (!q_ctx) + return ICE_ERR_NO_MEMORY; + memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], + prev_num * sizeof(*q_ctx)); + devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); + vsi_ctx->lan_q_ctx[tc] = q_ctx; + vsi_ctx->num_lan_q_entries[tc] = new_numqs; + } + return 0; +} + /** * ice_sched_clear_agg - clears the aggregator related information * @hw: pointer to the hardware structure @@ -1403,14 +1447,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, if (!vsi_ctx) return ICE_ERR_PARAM; - if (owner == ICE_SCHED_NODE_OWNER_LAN) - prev_numqs = vsi_ctx->sched.max_lanq[tc]; - else - return ICE_ERR_PARAM; - + prev_numqs = vsi_ctx->sched.max_lanq[tc]; /* num queues are not changed or less than the previous number */ if (new_numqs <= prev_numqs) return status; + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); /* Keep the max number of queue configuration all the time. Update the diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ad6bb0fce5d1..81f44939c859 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -328,6 +328,27 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) hw->vsi_ctx[vsi_handle] = vsi; } +/** + * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + */ +static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + u8 i; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi) + return; + ice_for_each_traffic_class(i) { + if (vsi->lan_q_ctx[i]) { + devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); + vsi->lan_q_ctx[i] = NULL; + } + } +} + /** * ice_clear_vsi_ctx - clear the VSI context entry * @hw: pointer to the HW struct @@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) vsi = ice_get_vsi_ctx(hw, vsi_handle); if (vsi) { + ice_clear_vsi_q_ctx(hw, vsi_handle); devm_kfree(ice_hw_to_dev(hw), vsi); hw->vsi_ctx[vsi_handle] = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 64a2fecfce20..88eb4be4d5a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -9,6 +9,13 @@ #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_DFLT_VSI_INVAL 0xff #define ICE_VSI_INVAL_ID 0xffff +#define ICE_INVAL_Q_HANDLE 0xFFFF +#define ICE_INVAL_Q_HANDLE 0xFFFF + +/* VSI queue context structure */ +struct ice_q_ctx { + u16 q_handle; +}; /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { @@ -20,6 +27,8 @@ struct ice_vsi_ctx { struct ice_sched_vsi_info sched; u8 alloc_from_pool; u8 vf_num; + u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; + struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; }; enum ice_sw_fwd_act_type { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e562ea15b79b..789b6f10b381 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -996,8 +996,8 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) /* Call Disable LAN Tx queue AQ call even when queues are not * enabled. This is needed for successful completiom of VFR */ - ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET, - vf->vf_id, NULL); + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); } hw = &pf->hw; -- cgit v1.2.3 From 85796d6e2fce748fbb59cf98c51b5f2e1bc409ca Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 28 Feb 2019 15:25:49 -0800 Subject: ice: Return configuration error without queue to disable If there is no queue to disable, return appropriate configuration error earlier without acquiring the lock. Signed-off-by: Akeem G Abodunrin Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index dce07882f7e1..1d25a4230308 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2932,14 +2932,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; - /* if queue is disabled already yet the disable queue command has to be - * sent to complete the VF reset, then call ice_aq_dis_lan_txq without - * any queue information - */ - if (!num_queues && rst_src) - return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, - NULL); + if (!num_queues) { + /* if queue is disabled already yet the disable queue command + * has to be sent to complete the VF reset, then call + * ice_aq_dis_lan_txq without any queue information + */ + if (rst_src) + return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, + vmvf_num, NULL); + return ICE_ERR_CFG; + } mutex_lock(&pi->sched_lock); -- cgit v1.2.3 From fe7219fa7c79722d75524e5be9d569eef2ead032 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 28 Feb 2019 15:25:50 -0800 Subject: ice: Resolve static analysis reported issue Static analysis points out the default case in the switch statement in ice_get_itr_intrl_gran() is an infeasible condition causing the default case statement to be unreachable. Remove it and since the function no longer returns anything but success, change it to just return void and update the only call to it accordingly. Signed-off-by: Bruce Allan Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 1d25a4230308..0f1c2267c9d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -647,7 +647,7 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) * Determines the itr/intrl granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. */ -static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) +static void ice_get_itr_intrl_gran(struct ice_hw *hw) { u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> @@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) hw->itr_gran = ICE_ITR_GRAN_MAX_25; hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; break; - default: - ice_debug(hw, ICE_DBG_INIT, - "Failed to determine itr/intrl granularity\n"); - return ICE_ERR_CFG; } - - return 0; } /** @@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) return status; - status = ice_get_itr_intrl_gran(hw); - if (status) - return status; + ice_get_itr_intrl_gran(hw); status = ice_init_all_ctrlq(hw); if (status) -- cgit v1.2.3 From 1553f4f77a495b4e78f7083f1f8341bef6dbe9c7 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:25:51 -0800 Subject: ice: Reduce scope of variable in ice_vsi_cfg_rxqs Reduce scope of the variable 'err' to inside the for loop instead of using it as a second looping conditional. Also while here, improve the debug message if we fail to configure a Rx queue. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index fa8ebd8a10ce..61bb9e92f6ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1641,7 +1641,6 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) */ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) { - int err = 0; u16 i; if (vsi->type == ICE_VSI_VF) @@ -1656,14 +1655,19 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) vsi->rx_buf_len = ICE_RXBUF_2048; setup_rings: /* set up individual rings */ - for (i = 0; i < vsi->num_rxq && !err; i++) - err = ice_setup_rx_ctx(vsi->rx_rings[i]); + for (i = 0; i < vsi->num_rxq; i++) { + int err; - if (err) { - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); - return -EIO; + err = ice_setup_rx_ctx(vsi->rx_rings[i]); + if (err) { + dev_err(&vsi->back->pdev->dev, + "ice_setup_rx_ctx failed for RxQ %d, err %d\n", + i, err); + return err; + } } - return err; + + return 0; } /** -- cgit v1.2.3 From a92e1bb6ade7526f0c2b7b462516b1941e965504 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 28 Feb 2019 15:25:52 -0800 Subject: ice: Validate ring existence and its q_vector per VSI When stopping Tx rings, we use 'i' as an ring array index for looking up whether the ice_ring exists and have assigned a q_vector. This checks rings only within a given TC and we need to go through every ring in VSI. Use 'q_idx' instead. Signed-off-by: Maciej Fijalkowski Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 61bb9e92f6ce..57b2873a6123 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2072,7 +2072,8 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { u16 v_idx; - if (!rings || !rings[i] || !rings[i]->q_vector) { + if (!rings || !rings[q_idx] || + !rings[q_idx]->q_vector) { err = -EINVAL; goto err_out; } -- cgit v1.2.3 From 0c2561c81f5d089781f7cb24b8ce9e52ac716f61 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:25:53 -0800 Subject: ice: Use ice_for_each_q_vector macro where possible There are many places in the code where we do the following: for (i = 0; i < vsi->num_q_vectors; i++) Instead use the macro mentioned in the commit title: ice_for_each_q_vector(vsi, i) Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 6 +++--- drivers/net/ethernet/intel/ice/ice_main.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 57b2873a6123..e75d8c4fadc6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1054,7 +1054,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) { int v_idx; - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_for_each_q_vector(vsi, v_idx) ice_free_q_vector(vsi, v_idx); } @@ -2409,7 +2409,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; - for (i = 0; i < vsi->num_q_vectors; i++) { + ice_for_each_q_vector(vsi, i) { u16 vector = i + base; int irq_num; @@ -2633,7 +2633,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) wr32(hw, GLINT_DYN_CTL(i), 0); ice_flush(hw); - for (i = 0; i < vsi->num_q_vectors; i++) + ice_for_each_q_vector(vsi, i) synchronize_irq(pf->msix_entries[i + base].vector); } } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8bdd311c1b4c..a32782be7f88 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1338,7 +1338,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { int i; - for (i = 0; i < vsi->num_q_vectors; i++) + ice_for_each_q_vector(vsi, i) ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); } @@ -1705,7 +1705,7 @@ void ice_napi_del(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_for_each_q_vector(vsi, v_idx) netif_napi_del(&vsi->q_vectors[v_idx]->napi); } @@ -1724,7 +1724,7 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_for_each_q_vector(vsi, v_idx) netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll, NAPI_POLL_WEIGHT); } @@ -2960,7 +2960,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) @@ -3334,7 +3334,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) -- cgit v1.2.3 From b4b418b3ad7e09aa8d3be84c5f096d770797cfad Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 28 Feb 2019 15:25:54 -0800 Subject: ice: Add 52 byte RSS hash key support Add support to set 52 byte RSS hash key. Signed-off-by: Paul Greenwalt Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 3 +++ drivers/net/ethernet/intel/ice/ice_lib.c | 12 +++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 583f92d4db4c..6ef083002f5b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1291,6 +1291,9 @@ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 #define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC +#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \ + (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \ + ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE) struct ice_aqc_get_set_rss_keys { u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index e75d8c4fadc6..982a3a9e9b8d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1394,7 +1394,6 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) */ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) { - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; struct ice_aqc_get_set_rss_keys *key; struct ice_pf *pf = vsi->back; enum ice_status status; @@ -1429,13 +1428,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) } if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + memcpy(key, + (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user, + ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); else - netdev_rss_key_fill((void *)seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - memcpy(&key->standard_rss_key, seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + netdev_rss_key_fill((void *)key, + ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); -- cgit v1.2.3 From b9c8bb06b53d28c83c47988f645b6cf4543c2685 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:25:55 -0800 Subject: ice: Add ability to update rx-usecs-high Currently the driver allows rx-usecs-high values to be set, but when querying the device for rx-usecs-high the value does not stick. This is because it was not yet implemented. Add code to allow the user to change rx-usecs-high and use this to set the q_vector's intrl value. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 31 +++++++++++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_lib.h | 1 + drivers/net/ethernet/intel/ice/ice_txrx.h | 1 + 4 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 64a4c4456ba0..f995ed599cd9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2228,12 +2228,18 @@ static int ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, struct ice_ring_container *rc) { - struct ice_pf *pf = rc->ring->vsi->back; + struct ice_pf *pf; + + if (!rc->ring) + return -EINVAL; + + pf = rc->ring->vsi->back; switch (c_type) { case ICE_RX_CONTAINER: ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; break; case ICE_TX_CONTAINER: ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); @@ -2342,6 +2348,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, switch (c_type) { case ICE_RX_CONTAINER: + if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || + (ec->rx_coalesce_usecs_high && + ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { + netdev_info(vsi->netdev, + "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n", + pf->hw.intrl_gran, ICE_MAX_INTRL); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { + rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; + wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector + + rc->ring->q_vector->v_idx), + ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high, + pf->hw.intrl_gran)); + } + if (ec->rx_coalesce_usecs != itr_setting && ec->use_adaptive_rx_coalesce) { netdev_info(vsi->netdev, @@ -2364,6 +2387,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, } break; case ICE_TX_CONTAINER: + if (ec->tx_coalesce_usecs_high) { + netdev_info(vsi->netdev, + "setting tx-usecs-high is not supported\n"); + return -EINVAL; + } + if (ec->tx_coalesce_usecs != itr_setting && ec->use_adaptive_tx_coalesce) { netdev_info(vsi->netdev, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 982a3a9e9b8d..4c6ecc25aaa0 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1764,7 +1764,7 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) * This function converts a decimal interrupt rate limit in usecs to the format * expected by firmware. */ -static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) { u32 val = intrl / gran; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 714ace077796..a91d3553cc89 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -80,4 +80,5 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); +u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index c75d9fd12a68..66e05032ee56 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -142,6 +142,7 @@ enum ice_rx_dtype { #define ICE_ITR_ADAPTIVE_BULK 0x0000 #define ICE_DFLT_INTRL 0 +#define ICE_MAX_INTRL 236 /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 -- cgit v1.2.3 From acd1751a3988e45e3464c9405dc5b95deb55865d Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:25:56 -0800 Subject: ice: Remove unnecessary wait when disabling/enabling Rx queues In ice_vsi_ctrl_rx_rings() we are unnecessarily waiting for QRX_CTRL_QENA_REQ and QRX_CTRL_QENA_STAT to be the same value prior to disabling each Rx queue. There is no reason to do this so remove this wait loop as we already have a wait loop after disabling/enabling the Rx queue through the QRX_CTRL register to make sure it gets successfully disabled/enabled. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 4c6ecc25aaa0..8e0a23e6b563 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -197,19 +197,13 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int i, j, ret = 0; + int i, ret = 0; for (i = 0; i < vsi->num_rxq; i++) { int pf_q = vsi->rxq_map[i]; u32 rx_reg; - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { - rx_reg = rd32(hw, QRX_CTRL(pf_q)); - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) - break; - usleep_range(1000, 2000); - } + rx_reg = rd32(hw, QRX_CTRL(pf_q)); /* Skip if the queue is already in the requested state */ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) -- cgit v1.2.3 From 5079b853b221005ac06192265c917ea79c11c0e2 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 28 Feb 2019 15:25:57 -0800 Subject: ice: Fix issue when adding more than allowed VLANs This patch fixes issue with non trusted VFs being able to add more than permitted number of VLANs by adding a check in ice_vc_process_vlan_msg. Also don't return an error in this case as the VF does not need to know that it is not trusted. Also rework ice_vsi_kill_vlan to use the right types. Signed-off-by: Akeem G Abodunrin Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 15 +++++++++------ drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 13 ++++++++++++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8e0a23e6b563..6d9571c8826d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1598,7 +1598,8 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) struct ice_fltr_list_entry *list; struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); - int status = 0; + enum ice_status status; + int err = 0; list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); if (!list) @@ -1614,14 +1615,16 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) INIT_LIST_HEAD(&list->list_entry); list_add(&list->list_entry, &tmp_add_list); - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", - vid, vsi->vsi_num); - status = -EIO; + status = ice_remove_vlan(&pf->hw, &tmp_add_list); + if (status) { + dev_err(&pf->pdev->dev, + "Error removing VLAN %d on vsi %i error: %d\n", + vid, vsi->vsi_num, status); + err = -EIO; } ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - return status; + return err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 789b6f10b381..f52f0fc52f46 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2329,7 +2329,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* There is no need to let VF know about being not trusted, * so we can just return success message here */ - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2370,6 +2369,18 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) for (i = 0; i < vfl->num_elements; i++) { u16 vid = vfl->vlan_id[i]; + if (!ice_is_vf_trusted(vf) && + vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(&pf->pdev->dev, + "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", + vf->vf_id); + /* There is no need to let VF know about being + * not trusted, so we can just return success + * message here as well. + */ + goto error_param; + } + if (ice_vsi_add_vlan(vsi, vid)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; -- cgit v1.2.3 From 8d7189d266ccec6dce1a4c2dd2bde6e0d632a24c Mon Sep 17 00:00:00 2001 From: Md Fahad Iqbal Polash Date: Thu, 28 Feb 2019 15:25:58 -0800 Subject: ice: Remove runtime change of PFINT_OICR_ENA register Runtime change of PFINT_OICR_ENA register is unnecessary. The handlers should always clear the atomic bit for each task as they start, because it will make sure that any late interrupt will either 1) re-set the bit, or 2) be handled directly in the "already running" task handler. Signed-off-by: Md Fahad Iqbal Polash Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 13 ++----------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 13 +------------ 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a32782be7f88..8f6f2a1e67ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1096,7 +1096,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) u32 reg; int i; - if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ @@ -1229,12 +1229,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) } } - /* re-enable MDD interrupt cause */ - clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); - reg = rd32(hw, PFINT_OICR_ENA); - reg |= PFINT_OICR_MAL_DETECT_M; - wr32(hw, PFINT_OICR_ENA, reg); - ice_flush(hw); } /** @@ -1523,7 +1517,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) rd32(hw, PFHMC_ERRORDATA)); } - /* Report and mask off any remaining unexpected interrupts */ + /* Report any remaining unexpected interrupts */ oicr &= ena_mask; if (oicr) { dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", @@ -1537,12 +1531,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) set_bit(__ICE_PFR_REQ, pf->state); ice_service_task_schedule(pf); } - ena_mask &= ~oicr; } ret = IRQ_HANDLED; - /* re-enable interrupt causes that are not handled during this pass */ - wr32(hw, PFINT_OICR_ENA, ena_mask); if (!test_bit(__ICE_DOWN, pf->state)) { ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index f52f0fc52f46..abc958788267 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1273,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf) int vf_id; u32 reg; - if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || !pf->num_alloc_vfs) return; - /* Re-enable the VFLR interrupt cause here, before looking for which - * VF got reset. Otherwise, if another VF gets a reset while the - * first one is being processed, that interrupt will be lost, and - * that VF will be stuck in reset forever. - */ - reg = rd32(hw, PFINT_OICR_ENA); - reg |= PFINT_OICR_VFLR_M; - wr32(hw, PFINT_OICR_ENA, reg); - ice_flush(hw); - - clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { struct ice_vf *vf = &pf->vf[vf_id]; u32 reg_idx, bit_idx; -- cgit v1.2.3 From b07833a00d70fb731bb3aba8876a56e37b549f3e Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:25:59 -0800 Subject: ice: Add reg_idx variable in ice_q_vector structure Every time we want to re-enable interrupts and/or write to a register that requires an interrupt vector's hardware index we do the following: vsi->hw_base_vector + q_vector->v_idx This is a wasteful operation, especially in the hot path. Fix this by adding a u16 reg_idx member to the ice_q_vector structure and make the necessary changes to make this work. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 3 +- drivers/net/ethernet/intel/ice/ice_lib.c | 84 ++++++++++++++++++++++++------- drivers/net/ethernet/intel/ice/ice_main.c | 13 +++-- drivers/net/ethernet/intel/ice/ice_txrx.c | 2 +- 4 files changed, 76 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 878a75182d6d..d66aad49bfd4 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -297,6 +297,7 @@ struct ice_q_vector { struct ice_vsi *vsi; u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ u8 itr_countdown; /* when 0 should adjust adaptive ITR */ @@ -403,7 +404,7 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : + u32 vector = (vsi && q_vector) ? q_vector->reg_idx : ((struct ice_pf *)hw->back)->hw_oicr_idx; int itr = ICE_ITR_NONE; u32 val; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 6d9571c8826d..399905396134 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1805,13 +1805,12 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) * ice_cfg_itr - configure the initial interrupt throttle values * @hw: pointer to the HW structure * @q_vector: interrupt vector that's being configured - * @vector: HW vector index to apply the interrupt throttling to * * Configure interrupt throttling values for the ring containers that are * associated with the interrupt vector passed in. */ static void -ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) +ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) { ice_cfg_itr_gran(hw); @@ -1825,7 +1824,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } @@ -1839,7 +1838,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } } @@ -1851,17 +1850,17 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) void ice_vsi_cfg_msix(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - u16 vector = vsi->hw_base_vector; struct ice_hw *hw = &pf->hw; u32 txq = 0, rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + for (i = 0; i < vsi->num_q_vectors; i++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; + u16 reg_idx = q_vector->reg_idx; - ice_cfg_itr(hw, q_vector, vector); + ice_cfg_itr(hw, q_vector); - wr32(hw, GLINT_RATE(vector), + wr32(hw, GLINT_RATE(reg_idx), ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); /* Both Transmit Queue Interrupt Cause Control register @@ -1886,7 +1885,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) else val = QINT_TQCTL_CAUSE_ENA_M | (itr_idx << QINT_TQCTL_ITR_INDX_S) | - (vector << QINT_TQCTL_MSIX_INDX_S); + (reg_idx << QINT_TQCTL_MSIX_INDX_S); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); txq++; } @@ -1902,7 +1901,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) else val = QINT_RQCTL_CAUSE_ENA_M | (itr_idx << QINT_RQCTL_ITR_INDX_S) | - (vector << QINT_RQCTL_MSIX_INDX_S); + (reg_idx << QINT_RQCTL_MSIX_INDX_S); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); rxq++; } @@ -2065,8 +2064,6 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - u16 v_idx; - if (!rings || !rings[q_idx] || !rings[q_idx]->q_vector) { err = -EINVAL; @@ -2088,8 +2085,7 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, /* trigger a software interrupt for the vector * associated to the queue to schedule NAPI handler */ - v_idx = rings[i]->q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); q_idx++; @@ -2208,6 +2204,44 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); } +/** + * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors + * @vsi: VSI to set the q_vectors register index on + */ +static int +ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) +{ + u16 i; + + if (!vsi || !vsi->q_vectors) + return -EINVAL; + + ice_for_each_q_vector(vsi, i) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + if (!q_vector) { + dev_err(&vsi->back->pdev->dev, + "Failed to set reg_idx on q_vector %d VSI %d\n", + i, vsi->vsi_num); + goto clear_reg_idx; + } + + q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector; + } + + return 0; + +clear_reg_idx: + ice_for_each_q_vector(vsi, i) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + if (q_vector) + q_vector->reg_idx = 0; + } + + return -EINVAL; +} + /** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure @@ -2273,6 +2307,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_alloc_q_vector; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); + if (ret) + goto unroll_vector_base; + ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_vector_base; @@ -2311,6 +2349,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } else { vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; } + ret = ice_vsi_set_q_vectors_reg_idx(vsi); + if (ret) + goto unroll_vector_base; + pf->q_left_tx -= vsi->alloc_txq; pf->q_left_rx -= vsi->alloc_rxq; break; @@ -2623,11 +2665,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) /* disable each interrupt */ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - for (i = vsi->hw_base_vector; - i < (vsi->num_q_vectors + vsi->hw_base_vector); i++) - wr32(hw, GLINT_DYN_CTL(i), 0); + ice_for_each_q_vector(vsi, i) + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); ice_flush(hw); + ice_for_each_q_vector(vsi, i) synchronize_irq(pf->msix_entries[i + base].vector); } @@ -2780,6 +2822,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_alloc_rings(vsi); if (ret) goto err_vectors; @@ -2801,6 +2847,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_alloc_rings(vsi); if (ret) goto err_vectors; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8f6f2a1e67ed..51af6b9a7ea2 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1592,23 +1592,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) /** * ice_ena_ctrlq_interrupts - enable control queue interrupts * @hw: pointer to HW structure - * @v_idx: HW vector index to associate the control queue interrupts with + * @reg_idx: HW vector index to associate the control queue interrupts with */ -static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx) +static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) { u32 val; - val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | PFINT_OICR_CTL_CAUSE_ENA_M); wr32(hw, PFINT_OICR_CTL, val); /* enable Admin queue Interrupt causes */ - val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) | + val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | PFINT_FW_CTL_CAUSE_ENA_M); wr32(hw, PFINT_FW_CTL, val); /* enable Mailbox queue Interrupt causes */ - val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) | + val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | PFINT_MBX_CTL_CAUSE_ENA_M); wr32(hw, PFINT_MBX_CTL, val); @@ -4214,8 +4214,7 @@ static void ice_tx_timeout(struct net_device *netdev) /* Read interrupt register */ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) val = rd32(hw, - GLINT_DYN_CTL(tx_ring->q_vector->v_idx + - tx_ring->vsi->hw_base_vector)); + GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 259f118c7d8b..e5af775a3fd9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1391,7 +1391,7 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) if (!test_bit(__ICE_DOWN, vsi->state)) wr32(&vsi->back->hw, - GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx), + GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } -- cgit v1.2.3 From 49a6a5d7ebfbf99ea5ba8fa4d55a29b7a446cbae Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 28 Feb 2019 15:26:00 -0800 Subject: ice: Add missing PHY type to link settings The PHY type ICE_PHY_TYPE_LOW_25G_AUI_C2C is missing from ice_get_settings_link_up() which is causing a warning message for unrecognized PHY. Add the PHY type to correctly set the settings and avoid the warning message. Signed-off-by: Tony Nguyen Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f995ed599cd9..0bfe696d8077 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1034,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, 25000baseCR_Full); break; case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); break; -- cgit v1.2.3 From c2a23e00613bde4a6d5f88c2b4facd5c7be6be87 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:26:01 -0800 Subject: ice: Refactor link event flow Currently the link event flow works, but can be much better. Refactor the link event flow to make it cleaner and more clear on what is going on. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 20 +++++++ drivers/net/ethernet/intel/ice/ice_main.c | 93 +++++++++++++++---------------- 2 files changed, 65 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d66aad49bfd4..804d12c2f1df 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -420,6 +420,26 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, wr32(hw, GLINT_DYN_CTL(vector), val); } +/** + * ice_find_vsi_by_type - Find and return VSI of a given type + * @pf: PF to search for VSI + * @type: Value indicating type of VSI we are looking for + */ +static inline struct ice_vsi * +ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + struct ice_vsi *vsi = pf->vsi[i]; + + if (vsi && vsi->type == type) + return vsi; + } + + return NULL; +} + void ice_set_ethtool_ops(struct net_device *netdev); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 51af6b9a7ea2..6b27be93bdf5 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -590,6 +590,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) const char *speed; const char *fc; + if (!vsi) + return; + if (vsi->current_isup == isup) return; @@ -659,15 +662,16 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) */ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) { - if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + if (!vsi) + return; + + if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) return; if (vsi->type == ICE_VSI_PF) { - if (!vsi->netdev) { - dev_dbg(&vsi->back->pdev->dev, - "vsi->netdev is not initialized!\n"); + if (link_up == netif_carrier_ok(vsi->netdev)) return; - } + if (link_up) { netif_carrier_on(vsi->netdev); netif_tx_wake_all_queues(vsi->netdev); @@ -682,61 +686,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) * ice_link_event - process the link event * @pf: pf that the link event is associated with * @pi: port_info for the port that the link event is associated with + * @link_up: true if the physical link is up and false if it is down + * @link_speed: current link speed received from the link event * - * Returns -EIO if ice_get_link_status() fails - * Returns 0 on success + * Returns 0 on success and negative on failure */ static int -ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) +ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, + u16 link_speed) { - u8 new_link_speed, old_link_speed; struct ice_phy_info *phy_info; - bool new_link_same_as_old; - bool new_link, old_link; - u8 lport; - u16 v; + struct ice_vsi *vsi; + u16 old_link_speed; + bool old_link; + int result; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; - /* Force ice_get_link_status() to update link info */ - phy_info->get_link_info = true; - old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); + old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); old_link_speed = phy_info->link_info_old.link_speed; - lport = pi->lport; - if (ice_get_link_status(pi, &new_link)) { + /* update the link info structures and re-enable link events, + * don't bail on failure due to other book keeping needed + */ + result = ice_update_link_info(pi); + if (result) dev_dbg(&pf->pdev->dev, - "Could not get link status for port %d\n", lport); - return -EIO; - } - - new_link_speed = phy_info->link_info.link_speed; - - new_link_same_as_old = (new_link == old_link && - new_link_speed == old_link_speed); + "Failed to update link status and re-enable link events for port %d\n", + pi->lport); - ice_for_each_vsi(pf, v) { - struct ice_vsi *vsi = pf->vsi[v]; + /* if the old link up/down and speed is the same as the new */ + if (link_up == old_link && link_speed == old_link_speed) + return result; - if (!vsi || !vsi->port_info) - continue; + vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + if (!vsi || !vsi->port_info) + return -EINVAL; - if (new_link_same_as_old && - (test_bit(__ICE_DOWN, vsi->state) || - new_link == netif_carrier_ok(vsi->netdev))) - continue; + ice_vsi_link_event(vsi, link_up); + ice_print_link_msg(vsi, link_up); - if (vsi->port_info->lport == lport) { - ice_print_link_msg(vsi, new_link); - ice_vsi_link_event(vsi, new_link); - } - } - - if (!new_link_same_as_old && pf->num_alloc_vfs) + if (pf->num_alloc_vfs) ice_vc_notify_link_state(pf); - return 0; + return result; } /** @@ -801,20 +795,23 @@ static int ice_init_link_events(struct ice_port_info *pi) /** * ice_handle_link_event - handle link event via ARQ * @pf: pf that the link event is associated with - * - * Return -EINVAL if port_info is null - * Return status on success + * @event: event structure containing link status info */ -static int ice_handle_link_event(struct ice_pf *pf) +static int +ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) { + struct ice_aqc_get_link_status_data *link_data; struct ice_port_info *port_info; int status; + link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; port_info = pf->hw.port_info; if (!port_info) return -EINVAL; - status = ice_link_event(pf, port_info); + status = ice_link_event(pf, port_info, + !!(link_data->link_info & ICE_AQ_LINK_UP), + le16_to_cpu(link_data->link_speed)); if (status) dev_dbg(&pf->pdev->dev, "Could not process link event, error %d\n", status); @@ -926,7 +923,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) switch (opcode) { case ice_aqc_opc_get_link_status: - if (ice_handle_link_event(pf)) + if (ice_handle_link_event(pf, &event)) dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; -- cgit v1.2.3 From 20ce2a1a2e4d9e431c5573eb944db71d0d5f3e29 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 28 Feb 2019 15:26:02 -0800 Subject: ice: Use dev_err when ice_cfg_vsi_lan fails dev_err makes more sense than dev_info when this call fails. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 399905396134..49c75371af08 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2368,7 +2368,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (ret) { - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); + dev_err(&pf->pdev->dev, + "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); goto unroll_vector_base; } @@ -2869,8 +2871,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (ret) { - dev_info(&vsi->back->pdev->dev, - "Failed VSI lan queue config\n"); + dev_err(&pf->pdev->dev, + "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); goto err_vectors; } return 0; -- cgit v1.2.3