summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_common.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c465
1 files changed, 464 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e93b1e40f627..2fb81e359cdf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -58,6 +59,17 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_e810
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810 based, false if not.
+ */
+bool ice_is_e810(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_E810;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -424,6 +436,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
li->an_info = link_data.an_info;
li->ext_info = link_data.ext_info;
li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
@@ -454,6 +467,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
@@ -1062,7 +1076,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
GLNVM_ULD_POR_DONE_1_M |\
GLNVM_ULD_PCIER_DONE_2_M)
- uld_mask = ICE_RESET_DONE_MASK;
+ uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
+ GLNVM_ULD_PE_DONE_M : 0);
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
@@ -1289,6 +1304,64 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc,
+ buf, buf_size, cd));
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+{
+ struct ice_sbq_cmd_desc desc = {0};
+ struct ice_sbq_msg_req msg = {0};
+ u16 msg_len;
+ int status;
+
+ msg_len = sizeof(msg);
+
+ msg.dest_dev = in->dest_dev;
+ msg.opcode = in->opcode;
+ msg.flags = ICE_SBQ_MSG_FLAGS;
+ msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+ msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
+ msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
+
+ if (in->opcode)
+ msg.data = cpu_to_le32(in->data);
+ else
+ /* data read comes back in completion, so shorten the struct by
+ * sizeof(msg.data)
+ */
+ msg_len -= sizeof(msg.data);
+
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
+ desc.param0.cmd_len = cpu_to_le16(msg_len);
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ if (!status && !in->opcode)
+ in->data = le32_to_cpu
+ (((struct ice_sbq_msg_cmpl *)&msg)->data);
+ return status;
+}
+
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -1938,6 +2011,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
+ case ICE_AQC_CAPS_RDMA:
+ caps->rdma = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1971,6 +2048,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
caps->maxtc = 4;
ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
+ if (caps->rdma) {
+ ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
+ caps->rdma = 0;
+ }
+
+ /* print message only when processing device capabilities
+ * during initialization.
+ */
+ if (caps == &hw->dev_caps.common_cap)
+ dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
}
}
@@ -2017,6 +2104,48 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
}
/**
+ * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_1588.
+ */
+static void
+ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_func_info *info = &func_p->ts_func_info;
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
+ func_p->common_cap.ieee_1588 = info->ena;
+
+ info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
+ info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
+ info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
+ info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
+
+ info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+
+ ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
+ func_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
+ info->src_tmr_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
+ info->tmr_ena);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
+ info->tmr_index_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
+ info->tmr_index_assoc);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
+ info->clk_freq);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
+ info->clk_src);
+}
+
+/**
* ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
@@ -2082,6 +2211,9 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
@@ -2155,6 +2287,57 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_1588 for device capabilities.
+ */
+static void
+ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
+ u32 logical_id = le32_to_cpu(cap->logical_id);
+ u32 phys_id = le32_to_cpu(cap->phys_id);
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
+ dev_p->common_cap.ieee_1588 = info->ena;
+
+ info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
+ info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
+ info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
+
+ info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
+ info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+
+ info->ena_ports = logical_id;
+ info->tmr_own_map = phys_id;
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
+ dev_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
+ info->tmr0_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
+ info->tmr0_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
+ info->tmr0_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
+ info->tmr1_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
+ info->tmr1_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
+ info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
+ info->ena_ports);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
+ info->tmr_own_map);
+}
+
+/**
* ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2215,6 +2398,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
@@ -3635,6 +3821,52 @@ do_aq:
return status;
}
+/**
+ * ice_aq_add_rdma_qsets
+ * @hw: pointer to the hardware structure
+ * @num_qset_grps: Number of RDMA Qset groups
+ * @qset_list: list of Qset groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx RDMA Qsets (0x0C33)
+ */
+static int
+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
+ struct ice_aqc_add_rdma_qset_data *qset_list,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_rdma_qset_data *list;
+ struct ice_aqc_add_rdma_qset *cmd;
+ struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
+
+ cmd = &desc.params.add_rdma_qset;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
+
+ if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
+ return -EINVAL;
+
+ for (i = 0, list = qset_list; i < num_qset_grps; i++) {
+ u16 num_qsets = le16_to_cpu(list->num_qsets);
+
+ sum_size += struct_size(list, rdma_qsets, num_qsets);
+ list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
+ num_qsets);
+ }
+
+ if (buf_size != sum_size)
+ return -EINVAL;
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qset_grps = num_qset_grps;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
+ buf_size, cd));
+}
+
/* End of FW Admin Queue command wrappers */
/**
@@ -4133,6 +4365,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_rdmaqs: max RDMA queues array per TC
+ *
+ * This function adds/updates the VSI RDMA queues per TC.
+ */
+int
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_rdmaqs)
+{
+ return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
+ max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA));
+}
+
+/**
+ * ice_ena_vsi_rdma_qset
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @rdma_qset: pointer to RDMA Qset
+ * @num_qsets: number of RDMA Qsets
+ * @qset_teid: pointer to Qset node TEIDs
+ *
+ * This function adds RDMA Qset
+ */
+int
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_aqc_add_rdma_qset_data *buf;
+ struct ice_sched_node *parent;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u16 i, buf_size;
+ int ret;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+ hw = pi->hw;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ buf_size = struct_size(buf, rdma_qsets, num_qsets);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ mutex_lock(&pi->sched_lock);
+
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+ ICE_SCHED_NODE_OWNER_RDMA);
+ if (!parent) {
+ ret = -EINVAL;
+ goto rdma_error_exit;
+ }
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+
+ buf->num_qsets = cpu_to_le16(num_qsets);
+ for (i = 0; i < num_qsets; i++) {
+ buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
+ buf->rdma_qsets[i].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->rdma_qsets[i].info.generic = 0;
+ buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ }
+ ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
+ if (ret) {
+ ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
+ goto rdma_error_exit;
+ }
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ for (i = 0; i < num_qsets; i++) {
+ node.node_teid = buf->rdma_qsets[i].qset_teid;
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (status) {
+ ret = ice_status_to_errno(status);
+ break;
+ }
+ qset_teid[i] = le32_to_cpu(node.node_teid);
+ }
+rdma_error_exit:
+ mutex_unlock(&pi->sched_lock);
+ kfree(buf);
+ return ret;
+}
+
+/**
+ * ice_dis_vsi_rdma_qset - free RDMA resources
+ * @pi: port_info struct
+ * @count: number of RDMA Qsets to free
+ * @qset_teid: TEID of Qset node
+ * @q_id: list of queue IDs being disabled
+ */
+int
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+ u16 *q_id)
+{
+ struct ice_aqc_dis_txq_item *qg_list;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+ u16 qg_size;
+ int i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+
+ hw = pi->hw;
+
+ qg_size = struct_size(qg_list, q_id, 1);
+ qg_list = kzalloc(qg_size, GFP_KERNEL);
+ if (!qg_list)
+ return -ENOMEM;
+
+ mutex_lock(&pi->sched_lock);
+
+ for (i = 0; i < count; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
+ if (!node)
+ continue;
+
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] =
+ cpu_to_le16(q_id[i] |
+ ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
+
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
+ ICE_NO_RESET, 0, NULL);
+ if (status)
+ break;
+
+ ice_free_sched_node(pi, node);
+ }
+
+ mutex_unlock(&pi->sched_lock);
+ kfree(qg_list);
+ return ice_status_to_errno(status);
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -4304,6 +4692,81 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_set_driver_param - Set driver parameter to share via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: the value to set the parameter to
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the value of one of the software defined parameters. All PFs connected
+ * to this device can read the value using ice_aq_get_driver_param.
+ *
+ * Note that firmware provides no synchronization or locking, and will not
+ * save the parameter value during a device reset. It is expected that
+ * a single PF will write the parameter value, while all other PFs will only
+ * read it.
+ */
+int
+ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
+ cmd->param_indx = idx;
+ cmd->param_val = cpu_to_le32(value);
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_driver_param - Get driver parameter shared via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: storage to return the shared parameter
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the value of one of the software defined parameters.
+ *
+ * Note that firmware provides no synchronization or locking. It is expected
+ * that only a single PF will write a given parameter.
+ */
+int
+ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
+ cmd->param_indx = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = le32_to_cpu(cmd->param_val);
+
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*