From 7425d8220f8d0c2127aec75677f652a26f86bc95 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 19 Apr 2018 05:50:11 -0700 Subject: qed* : use trust mode to allow VF to override forced MAC As per existing behavior, when PF sets a MAC address for a VF (also called as forced MAC), VF is not allowed to change its MAC address afterwards. This puts the limitation on few use cases such as bonding of VFs, where bonding driver asks VF to change its MAC address. This patch uses a VF trust mode to allow VF to change its MAC address in spite PF has set a forced MAC for that VF. Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 210 +++++++++++++++++++++++++--- 1 file changed, 194 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5acb91b3564c..77376fda8eae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code); - +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { @@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; - if (events & BIT(MAC_ADDR_FORCED)) { + if ((events & BIT(MAC_ADDR_FORCED)) || + p_vf->p_vf_info.is_trusted_configured) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, "PF failed to configure MAC for VF\n"); return rc; } - - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_vf->p_vf_info.is_trusted_configured) + p_vf->configured_features |= + BIT(VFPF_BULLETIN_MAC_ADDR); + else + p_vf->configured_features |= + BIT(MAC_ADDR_FORCED); } if (events & BIT(VLAN_ADDR_FORCED)) { @@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; + /* Don't keep track of shadow copy since we don't intend to restore. */ + if (p_vf->p_vf_info.is_trusted_configured) + return 0; + /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { @@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, /* No real decision to make; Store the configured MAC */ if (params->type == QED_FILTER_MAC || - params->type == QED_FILTER_MAC_VLAN) + params->type == QED_FILTER_MAC_VLAN) { ether_addr_copy(vf->mac, params->mac); + if (vf->is_trusted_configured) { + qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); + + /* Update and post bulleitin again */ + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + return 0; } @@ -4081,16 +4098,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (vf_info->p_vf_info.is_trusted_configured) { + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + /* Trust mode will disable Forced MAC */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + } else { + feature = BIT(MAC_ADDR_FORCED); + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(VFPF_BULLETIN_MAC_ADDR); + } + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; - /* Forced MAC will disable MAC_ADDR */ - vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return -EINVAL; + } + + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + if (vf_info->p_vf_info.is_trusted_configured) + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); + + return 0; +} + static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, u16 pvid, int vfid) { @@ -4204,6 +4265,21 @@ out: return rc; } +static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + BIT(VFPF_BULLETIN_MAC_ADDR))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { @@ -4493,8 +4569,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) if (!vf_info) continue; - /* Set the forced MAC, and schedule the IOV task */ - ether_addr_copy(vf_info->forced_mac, mac); + /* Set the MAC, and schedule the IOV task */ + if (vf_info->is_trusted_configured) + ether_addr_copy(vf_info->mac, mac); + else + ether_addr_copy(vf_info->forced_mac, mac); + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); } @@ -4802,6 +4882,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, + u8 *mac, + struct qed_public_vf_info *info) +{ + if (info->is_trusted_configured) { + if (is_valid_ether_addr(info->mac) && + (!mac || !ether_addr_equal(mac, info->mac))) + return true; + } else { + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) + return true; + } + + return false; +} + +static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, + struct qed_public_vf_info *info, + int vfid) +{ + if (info->is_trusted_configured) + qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); + else + qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); +} + static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) { int i; @@ -4816,18 +4923,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) continue; /* Update data on bulletin board */ - mac = qed_iov_bulletin_get_forced_mac(hwfn, i); - if (is_valid_ether_addr(info->forced_mac) && - (!mac || !ether_addr_equal(mac, info->forced_mac))) { + if (info->is_trusted_configured) + mac = qed_iov_bulletin_get_mac(hwfn, i); + else + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + + if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); - /* Update bulletin board with forced MAC */ - qed_iov_bulletin_set_forced_mac(hwfn, - info->forced_mac, i); + /* Update bulletin board with MAC */ + qed_set_bulletin_mac(hwfn, info, i); update = true; } @@ -4867,6 +4976,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) +{ + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 *force_mac; + int i; + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + vf = qed_iov_get_vf_info(hwfn, vf_id, true); + + if (!vf_info || !vf) + return; + + /* Force MAC converted to generic MAC in case of VF trust on */ + if (vf_info->is_trusted_configured && + (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { + force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); + + if (force_mac) { + /* Clear existing shadow copy of MAC to have a clean + * slate. + */ + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + vf_info->mac)) { + memset(vf->shadow_config.macs[i], 0, + ETH_ALEN); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", + vf_info->mac, vf_id); + break; + } + } + + ether_addr_copy(vf_info->mac, force_mac); + memset(vf_info->forced_mac, 0, ETH_ALEN); + vf->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + + /* Update shadow copy with VF MAC when trust mode is turned off */ + if (!vf_info->is_trusted_configured) { + u8 empty_mac[ETH_ALEN]; + + memset(empty_mac, 0, ETH_ALEN); + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + empty_mac)) { + ether_addr_copy(vf->shadow_config.macs[i], + vf_info->mac); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", + vf_info->mac, vf_id); + break; + } + } + /* Clear bulletin when trust mode is turned off, + * to have a clean slate for next (normal) operations. + */ + qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) { struct qed_sp_vport_update_params params; @@ -4890,6 +5065,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) continue; vf_info->is_trusted_configured = vf_info->is_trusted_request; + /* Handle forced MAC mode */ + qed_update_mac_for_vf_trust_change(hwfn, i); + /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); if (!vf->vport_instance) -- cgit v1.2.3 From 809c45a091d93e05c6e9b5d53bb3f1185273286b Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 19 Apr 2018 05:50:12 -0700 Subject: qed* : Add new TLV to request PF to update MAC in bulletin board There may be a need for VF driver to request PF to explicitly update its bulletin with a MAC address. e.g. When user assigns a MAC address to VF while VF is still down, and PF's bulletin board contains different MAC address, in this case, when VF's interface is brought up, it gets loaded with MAC address from bulletin board which is not desirable. To handle this corner case, we need a new TLV to request PF to update its bulletin board with suggested MAC. This request will be honored only for trusted VFs. Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 19 +++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 37 ++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.c | 29 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 21 +++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_filter.c | 4 +++ include/linux/qed/qed_eth_if.h | 1 + 6 files changed, 111 insertions(+) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e874504e8b28..8b1b7e8ca56c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2850,6 +2850,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +{ + int i, ret; + + if (IS_PF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); + if (ret) + return ret; + } + + return 0; +} + #ifdef CONFIG_QED_SRIOV extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif @@ -2887,6 +2905,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, .get_coalesce = &qed_get_coalesce, + .req_bulletin_update_mac = &qed_req_bulletin_update_mac, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 77376fda8eae..f01bf52bc381 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3820,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); } +static int +qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct vfpf_bulletin_update_mac_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; + + if (!p_vf->p_vf_info.is_trusted_configured) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Blocking bulletin update request from untrusted VF[%d]\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + rc = -EINVAL; + goto send_status; + } + + p_req = &mbx->req_virt->bulletin_update_mac; + ether_addr_copy(p_bulletin->mac, p_req->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Updated bulletin of VF[%d] with requested MAC[%pM]\n", + p_vf->abs_vf_id, p_req->mac); + +send_status: + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(struct pfvf_def_resp_tlv), status); + return rc; +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -3899,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_READ: qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_BULLETIN_UPDATE_MAC: + qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 91b5e9f02a62..2d7fcd6a0777 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1374,6 +1374,35 @@ exit: return rc; } +int +qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_bulletin_update_mac_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + int rc; + + if (!p_mac) + return -EINVAL; + + /* clear mailbox and prep header tlv */ + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(*p_req)); + ether_addr_copy(p_req->mac, p_mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requesting bulletin update for MAC[%pM]\n", p_mac); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 97d44dfb38ca..4f05d5eb3cf5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv { u8 padding[6]; }; +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -532,6 +538,7 @@ union vfpf_tlvs { struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; struct tlv_buffer_size tlv_buf_size; }; @@ -650,6 +657,7 @@ enum { CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tunn); u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); +/** + * @brief - Ask PF to update the MAC address in it's bulletin board + * + * @param p_mac - mac address to be updated in bulletin board + */ +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); + #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) @@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, return -EINVAL; } +static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + return -EINVAL; +} + static inline u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 8094f035b705..43569b1839be 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1160,6 +1160,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "The device is currently down\n"); + /* Ask PF to explicitly update a copy in bulletin board */ + if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) + edev->ops->req_bulletin_update_mac(edev->cdev, + ndev->dev_addr); goto out; } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 147d08ccf813..7f9756fe9180 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -352,6 +352,7 @@ struct qed_eth_ops { int (*configure_arfs_searcher)(struct qed_dev *cdev, enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); + int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); }; const struct qed_eth_ops *qed_get_eth_ops(void); -- cgit v1.2.3 From b60bfdfec5b8ec88552e75c8bd99f1ebfa66a6e0 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 23 Apr 2018 14:56:04 +0300 Subject: qed: Delete unused parameter p_ptt from mcp APIs Since nvm images attributes are cached during driver load, acquiring ptt is not needed when calling qed_mcp_get_nvm_image(). Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +-------- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 4 +--- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 2 -- 3 files changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9854aa9139af..d1d3787affe8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1894,15 +1894,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); - int rc; - - if (!ptt) - return -EAGAIN; - rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); - qed_ptt_release(hwfn, ptt); - return rc; + return qed_mcp_get_nvm_image(hwfn, type, buf, len); } static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ec0d425766a7..1377ad172fb3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2531,7 +2531,6 @@ err0: static int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { @@ -2569,7 +2568,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, } int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len) { @@ -2578,7 +2576,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, memset(p_buffer, 0, buffer_len); - rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); if (rc) return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8a5c988d0c3c..dd62c38b3bd3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -486,7 +486,6 @@ struct qed_nvm_image_att { * @brief Allows reading a whole nvram image * * @param p_hwfn - * @param p_ptt * @param image_id - image requested for reading * @param p_buffer - allocated buffer into which to fill data * @param buffer_len - length of the allocated buffer. @@ -494,7 +493,6 @@ struct qed_nvm_image_att { * @return 0 iff p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); -- cgit v1.2.3 From 1ac4329a1cff2e0bb12b71c13ad53a0e05bc87a6 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 23 Apr 2018 14:56:05 +0300 Subject: qed: Add configuration information to register dump and debug data Configuration information is added to the debug data collection, in addition to register dump. Added qed_dbg_nvm_image() that receives an image type, allocates a buffer and reads the image. The images are saved in the buffers and the dump size is updated. Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_debug.c | 113 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 14 +++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 14 ++++ include/linux/qed/qed_if.h | 3 + 4 files changed, 139 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 4926c5532fba..b3211c7d38c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7778,6 +7778,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } +int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) +{ + struct qed_nvm_image_att image_att; + int rc; + + *length = 0; + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); + if (rc) + return rc; + + *length = image_att.length; + + return rc; +} + +int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, enum qed_nvm_images image_id) +{ + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 len_rounded, i; + __be32 val; + int rc; + + *num_dumped_bytes = 0; + rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded); + if (rc) + return rc; + + DP_NOTICE(p_hwfn->cdev, + "Collecting a debug feature [\"nvram image %d\"]\n", + image_id); + + len_rounded = roundup(len_rounded, sizeof(u32)); + rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded); + if (rc) + return rc; + + /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ + if (image_id != QED_NVM_IMAGE_NVM_META) + for (i = 0; i < len_rounded; i += 4) { + val = cpu_to_be32(*(u32 *)(buffer + i)); + *(u32 *)(buffer + i) = val; + } + + *num_dumped_bytes = len_rounded; + + return rc; +} + int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { @@ -7831,6 +7882,9 @@ enum debug_print_features { IGU_FIFO = 6, PHY = 7, FW_ASSERTS = 8, + NVM_CFG1 = 9, + DEFAULT_CFG = 10, + NVM_META = 11, }; static u32 qed_calc_regdump_header(enum debug_print_features feature, @@ -7965,13 +8019,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* nvm cfg1 */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_CFG1); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_CFG1, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + } + + /* nvm default */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(DEFAULT_CFG, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", + rc); + } + + /* nvm meta */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_META); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_META, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + } + return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 regs_len = 0, image_len = 0; u8 cur_engine, org_engine; - u32 regs_len = 0; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7993,6 +8095,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) /* Engine common */ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; return regs_len; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 1377ad172fb3..0550f0ee11b3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2529,7 +2529,7 @@ err0: return rc; } -static int +int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) @@ -2545,6 +2545,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; + case QED_NVM_IMAGE_NVM_CFG1: + type = NVM_TYPE_NVM_CFG1; + break; + case QED_NVM_IMAGE_DEFAULT_CFG: + type = NVM_TYPE_DEFAULT_CFG; + break; + case QED_NVM_IMAGE_NVM_META: + type = NVM_TYPE_META; + break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", image_id); @@ -2588,9 +2597,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, return -EINVAL; } - /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ - image_att.length -= 4; - if (image_att.length > buffer_len) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dd62c38b3bd3..3af3896420b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -482,6 +482,20 @@ struct qed_nvm_image_att { u32 length; }; +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn + * @param image_id - image to get attributes for + * @param p_image_att - image attributes structure into which to fill data + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att); + /** * @brief Allows reading a whole nvram image * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b5b2bc9eacca..e53f9c7c2809 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -159,6 +159,9 @@ struct qed_dcbx_get { enum qed_nvm_images { QED_NVM_IMAGE_ISCSI_CFG, QED_NVM_IMAGE_FCOE_CFG, + QED_NVM_IMAGE_NVM_CFG1, + QED_NVM_IMAGE_DEFAULT_CFG, + QED_NVM_IMAGE_NVM_META, }; struct qed_link_eee_params { -- cgit v1.2.3 From c7d852e301d834949e570920db808201bdc51a22 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Tue, 24 Apr 2018 15:32:53 +0300 Subject: qed: Fix copying 2 strings The strscpy() was a recent fix (net: qed: use correct strncpy() size) to prevent passing the length of the source buffer to strncpy() and guarantee null termination. It misses the goal of overwriting only the first 3 characters in "???_BIG_RAM" and "???_RAM" while keeping the rest of the string. Use strncpy() with the length of 3, without null termination. Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index b3211c7d38c2..39124b594a36 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -419,6 +419,7 @@ struct phy_defs { #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 +#define BIG_RAM_NAME_LEN 3 #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) @@ -3650,8 +3651,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; - strscpy(type_name, big_ram->instance_name, sizeof(type_name)); - strscpy(mem_name, big_ram->instance_name, sizeof(mem_name)); + strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, -- cgit v1.2.3 From 0bc5fe857274133ca028ebb15ff2e8549a369916 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:42:59 -0700 Subject: qed*: Refactor mf_mode to consist of bits. `mf_mode' field indicates the multi-partitioning mode the device is configured to. This method doesn't scale very well, adding a new MF mode requires going over all the existing conditions, and deciding whether those are needed for the new mode or not. The patch defines a set of bit-fields for modes which are derived according to the mode info shared by the MFW and all the configuration would be made according to those. To add a new mode, there would be a single place where we'll need to go and choose which bits apply and which don't. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 41 ++++++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dev.c | 39 +++++++++++---------- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 6 ++-- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 ++-- drivers/net/ethernet/qlogic/qed/qed_sp.h | 3 +- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 16 +++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 4 +-- include/linux/qed/qed_if.h | 2 +- 8 files changed, 71 insertions(+), 46 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e07460a68d30..c8f3507d1151 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -439,6 +439,41 @@ struct qed_fw_data { u32 init_ops_size; }; +enum qed_mf_mode_bit { + /* Supports PF-classification based on tag */ + QED_MF_OVLAN_CLSS, + + /* Supports PF-classification based on MAC */ + QED_MF_LLH_MAC_CLSS, + + /* Supports PF-classification based on protocol type */ + QED_MF_LLH_PROTO_CLSS, + + /* Requires a default PF to be set */ + QED_MF_NEED_DEF_PF, + + /* Allow LL2 to multicast/broadcast */ + QED_MF_LL2_NON_UNICAST, + + /* Allow Cross-PF [& child VFs] Tx-switching */ + QED_MF_INTER_PF_SWITCH, + + /* Unified Fabtic Port support enabled */ + QED_MF_UFP_SPECIFIC, + + /* Disable Accelerated Receive Flow Steering (aRFS) */ + QED_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + QED_MF_8021Q_TAGGING, + + /* Use stag for steering */ + QED_MF_8021AD_TAGGING, + + /* Allow DSCP to TC mapping */ + QED_MF_DSCP_TO_TC_MAP, +}; + enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ @@ -669,10 +704,8 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum qed_mf_mode mf_mode; -#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) + + unsigned long mf_bits; int pcie_width; int pcie_speed; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d2ad5e92c74f..9b07d7f25042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) return -EINVAL; } - switch (p_hwfn->cdev->mf_mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) hw_mode |= 1 << MODE_MF_SD; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else hw_mode |= 1 << MODE_MF_SI; - } hw_mode |= 1 << MODE_ASIC; @@ -1557,7 +1549,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, - p_hwfn->cdev->mf_mode, allow_npar_tx_switch); if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); @@ -2651,17 +2642,25 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = QED_MF_OVLAN; + p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = QED_MF_NPAR; + p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST) | + BIT(QED_MF_INTER_PF_SWITCH); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: - p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; + p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST); + if (QED_IS_BB(p_hwfn->cdev)) + p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); break; } - DP_INFO(p_hwfn, "Multi function mode is %08x\n", - p_hwfn->cdev->mf_mode); + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + p_hwfn->cdev->mf_bits); /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -3462,7 +3461,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return 0; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3507,7 +3506,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3549,7 +3548,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return 0; switch (type) { @@ -3647,7 +3646,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return; switch (type) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 38502815d681..6c942c133b2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -922,9 +922,9 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; - if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && - (conn_type != QED_LL2_TYPE_IWARP)) { + if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && + p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && + conn_type != QED_LL2_TYPE_IWARP) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d1d3787affe8..307fe33d75c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -264,7 +264,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); - dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); + dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS, + &cdev->mf_bits); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); @@ -273,7 +274,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION; - dev_info->mf_mode = cdev->mf_mode; + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, + &cdev->mf_bits); dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ab4ad8a1e2a5..768022235451 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param p_tunn - * @param mode * @param allow_npar_tx_switch * * @return int @@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch); + bool allow_npar_tx_switch); /** * @brief qed_sp_pf_update - PF Function Update Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 5e927b6cac22..fbb317257629 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch) + bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); @@ -339,18 +339,10 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); - switch (mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - p_ramrod->mf_mode = MF_NPAR; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else p_ramrod->mf_mode = MF_NPAR; - } p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); @@ -365,7 +357,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); - if (IS_MF_SI(p_hwfn)) + if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..89c581c3c21a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -199,7 +199,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && - qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + !qed_info->b_inter_pf_switch && qed_info->tx_switching) { vport_params->vport_id = 0; vport_params->update_tx_switching_flg = 1; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; @@ -1928,7 +1928,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 1; - if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { vport_update_params->update_tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e53f9c7c2809..5dac56147a07 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -359,7 +359,7 @@ struct qed_dev_info { #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; - u8 mf_mode; + bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; u16 mtu; -- cgit v1.2.3 From 27bf96e32c92599dc7523b36d6c761fc8312c8c0 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:00 -0700 Subject: qed: Remove unused data member 'is_mf_default'. The data member 'is_mf_default' is not used by the qed/qede drivers, removing the same. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 -- include/linux/qed/qed_if.h | 1 - 2 files changed, 3 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 307fe33d75c8..70bc5634675c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -264,8 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); - dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS, - &cdev->mf_bits); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 5dac56147a07..907976fd56f7 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -339,7 +339,6 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; - bool is_mf_default; /* FW version */ u16 fw_major; -- cgit v1.2.3 From b51bdfb9cbe2ecf99a4c45c48c6286963344786c Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:01 -0700 Subject: qed: Add support for multi function mode with 802.1ad tagging. The patch adds support for new Multi function mode wherein the traffic classification is done based on the 802.1ad tagging and the outer vlan tag provided by the management firmware. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 64 ++++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 5 ++ 2 files changed, 49 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9b07d7f25042..95d00cbe1a7f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1668,6 +1668,18 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (rc) return rc; + if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits)) { + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, + ETH_P_8021AD); + } + qed_fill_load_req_params(&load_req_params, p_params->p_drv_load_params); rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, @@ -2630,39 +2642,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link->pause.autoneg, p_caps->default_eee, p_caps->eee_lpi_timer); - /* Read Multi-function information from shmem */ - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, glob) + - offsetof(struct nvm_cfg1_glob, generic_cont0); + if (IS_LEAD_HWFN(p_hwfn)) { + struct qed_dev *cdev = p_hwfn->cdev; - generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, generic_cont0); - mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> - NVM_CFG1_GLOB_MF_MODE_OFFSET; + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); - switch (mf_mode) { - case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); - break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_8021AD_TAGGING); + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | BIT(QED_MF_INTER_PF_SWITCH); - break; - case NVM_CFG1_GLOB_MF_MODE_DEFAULT: - p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST); - if (QED_IS_BB(p_hwfn->cdev)) - p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); - break; + if (QED_IS_BB(p_hwfn->cdev)) + cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); + break; + } + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + cdev->mf_bits); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", p_hwfn->cdev->mf_bits); - /* Read Multi-function information from shmem */ + /* Read device capabilities information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, device_capabilities); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index fbb317257629..26bed26b9071 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -346,6 +346,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, -- cgit v1.2.3 From cac6f691546b9efd50c31c0db97fe50d0357104a Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:02 -0700 Subject: qed: Add support for Unified Fabric Port. This patch adds driver changes for supporting the Unified Fabric Port (UFP). This is a new paritioning mode wherein MFW provides the set of parameters to be used by the device such as traffic class, outer-vlan tag value, priority type etc. Drivers receives this info via notifications from mfw and configures the hardware accordingly. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 20 ++++++ drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 14 +++- drivers/net/ethernet/qlogic/qed/qed_dev.c | 32 ++++++++-- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 3 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 28 ++++++++ drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 ++++++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 78 +++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 8 +++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 9 +++ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 57 ++++++++++++++++- drivers/scsi/qedf/qedf_fip.c | 7 +- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qedi/qedi_iscsi.c | 2 +- include/linux/qed/qed_ll2_if.h | 10 ++- 14 files changed, 283 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index c8f3507d1151..adcff495466e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -474,6 +474,24 @@ enum qed_mf_mode_bit { QED_MF_DSCP_TO_TC_MAP, }; +enum qed_ufp_mode { + QED_UFP_MODE_ETS, + QED_UFP_MODE_VNIC_BW, + QED_UFP_MODE_UNKNOWN +}; + +enum qed_ufp_pri_type { + QED_UFP_PRI_OS, + QED_UFP_PRI_VNIC, + QED_UFP_PRI_UNKNOWN +}; + +struct qed_ufp_info { + enum qed_ufp_pri_type pri_type; + enum qed_ufp_mode mode; + u8 tc; +}; + enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ @@ -582,6 +600,8 @@ struct qed_hwfn { struct qed_dcbx_info *p_dcbx_info; + struct qed_ufp_info ufp_info; + struct qed_dmae_info dmae_info; /* QM init */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 449777f21237..8f31406ec894 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, u32 pri_tc_tbl, int count, u8 dcbx_version) { enum dcbx_protocol_type type; + bool enable, ieee, eth_tlv; u8 tc, priority_map; - bool enable, ieee; u16 protocol_id; int priority; int i; @@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); + eth_tlv = false; /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ - enable = !(type == DCBX_PROTOCOL_ETH); + if (type == DCBX_PROTOCOL_ETH) { + enable = false; + eth_tlv = true; + } else { + enable = true; + } qed_dcbx_update_app_info(p_data, p_hwfn, enable, priority, tc, type); } } + /* If Eth TLV is not detected, use UFP TC as default TC */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; + /* Update ramrod protocol data and hw_info fields * with default info when corresponding APP TLV's are not detected. * The enabled field has a different logic for ethernet as only for diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 95d00cbe1a7f..560528962658 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1499,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, p_hwfn->hw_info.ovlan); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, + 1); } /* Enable classification by MAC if needed */ @@ -1635,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; + u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -1668,16 +1674,22 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (rc) return rc; - if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING, - &cdev->mf_bits)) { + if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, + &cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits))) { + if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) + ether_type = ETH_P_8021Q; + else + ether_type = ETH_P_8021AD; STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, - ETH_P_8021AD); + ether_type); } qed_fill_load_req_params(&load_req_params, @@ -2659,6 +2671,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_UFP_SPECIFIC) | + BIT(QED_MF_8021Q_TAGGING); + break; case NVM_CFG1_GLOB_MF_MODE_BD: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | @@ -2879,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_get_eee_caps(p_hwfn, p_ptt); + + qed_mcp_read_ufp_config(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 2dc9b312a795..cc1b373c0ace 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, p_data->d_id.addr_mid = p_conn->d_id.addr_mid; p_data->d_id.addr_lo = p_conn->d_id.addr_lo; p_data->flags = p_conn->flags; + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + SET_FIELD(p_data->flags, + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); p_data->def_q_idx = p_conn->def_q_idx; return qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 7f5ec42dde48..b5f70eff0182 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11993,6 +11993,16 @@ struct public_port { #define EEE_REMOTE_TW_TX_OFFSET 0 #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 }; struct public_func { @@ -12069,6 +12079,23 @@ struct public_func { #define DRV_ID_DRV_INIT_HW_MASK 0x80000000 #define DRV_ID_DRV_INIT_HW_SHIFT 31 #define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 }; struct mcp_mac { @@ -12495,6 +12522,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, + MFW_DRV_MSG_OEM_CFG_UPDATE, MFW_DRV_MSG_MAX }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6c942c133b2c..c3c1a99a0252 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -919,6 +919,10 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_stripping_en = p_ll2_conn->input.rx_vlan_removal_en; + + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) + p_ramrod->report_outer_vlan = 1; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; @@ -1493,11 +1497,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_add_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -1653,11 +1658,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) + p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); - else + } else { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) + pkt->remove_stag = true; + } + SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); @@ -1668,6 +1678,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, + !!(pkt->remove_stag)); + start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); @@ -1884,11 +1897,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -2360,7 +2374,8 @@ fail: return -EINVAL; } -static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) +static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags) { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; @@ -2405,6 +2420,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) pkt.first_frag = mapping; pkt.first_frag_len = skb->len; pkt.cookie = skb; + if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) + pkt.remove_stag = true; rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 0550f0ee11b3..e80f5e7c7992 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -40,6 +40,7 @@ #include #include #include "qed.h" +#include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) &resp, ¶m); } +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + return; + + memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, oem_cfg_port)); + val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> + OEM_CFG_CHANNEL_TYPE_OFFSET; + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + + val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; + if (val == OEM_CFG_SCHED_TYPE_ETS) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; + } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; + } else { + p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + } + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; + p_hwfn->ufp_info.tc = (u8)val; + val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; + } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; + } else { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + } + + DP_NOTICE(p_hwfn, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, + p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); +} + +static int +qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + qed_qm_reconf(p_hwfn, p_ptt); + } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { + /* Merge UFP TC with the dcbx TC data */ + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + } else { + DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); + return -EINVAL; + } + + /* update storm FW with negotiation results */ + qed_sp_pf_update_ufp(p_hwfn); + + /* update stag pcp value */ + qed_sp_pf_update_stag(p_hwfn); + + return 0; +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_OPERATIONAL_MIB); break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + qed_mcp_handle_ufp_event(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 3af3896420b9..250579ba632b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1004,6 +1004,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Read ufp config from the shared memory. + * + * @param p_hwfn + * @param p_ptt + */ +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /** * @brief Populate the nvm info shadow in the given hardware function * diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 768022235451..e95431f6acd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -462,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); * @return int */ +/** + * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * + * @param p_hwfn + * + * @return int + */ +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); + int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 26bed26b9071..8de644b4721e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; - u8 page_cnt; + u8 page_cnt, i; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -345,12 +345,30 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->mf_mode = MF_NPAR; p_ramrod->outer_tag_config.outer_tag.tci = - cpu_to_le16(p_hwfn->hw_info.ovlan); - if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; p_ramrod->outer_tag_config.enable_stag_pri_change = 1; } + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode. + */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + else + p_ramrod->outer_tag_config.enable_stag_pri_change = 0; + + p_ramrod->outer_tag_config.outer_tag.tci |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); + } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, @@ -431,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EOPNOTSUPP; + + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { + DP_INFO(p_hwfn, "Invalid priority type %d\n", + p_hwfn->ufp_info.pri_type); + return -EINVAL; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; + else + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index 773558fc0697..16d1a21cdff9 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) struct fip_vlan *vlan; #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + unsigned long flags = 0; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) @@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) kfree_skb(skb); return; } - qed_ops->ll2->start_xmit(qedf->cdev, skb); + + set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); + qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); } static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, @@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); } /* Process incoming FIP frames. */ diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 284ccb566b19..6c19015975a8 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -994,7 +994,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); return 0; } diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 7ec7f6e00fb8..ff21aa1fd939 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1150,7 +1150,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) if (vlanid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); - rc = qedi_ops->ll2->start_xmit(cdev, skb); + rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", rc); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 266c1fb45387..5eb022953aca 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info { bool enable_ip_cksum; bool enable_l4_cksum; bool calc_ip_len; + bool remove_stag; }; #define QED_LL2_UNUSED_HANDLE (0xff) @@ -220,6 +221,11 @@ struct qed_ll2_params { u8 ll2_mac_address[ETH_ALEN]; }; +enum qed_ll2_xmit_flags { + /* FIP discovery packet */ + QED_LL2_XMIT_FLAGS_FIP_DISCOVERY +}; + struct qed_ll2_ops { /** * @brief start - initializes ll2 @@ -245,10 +251,12 @@ struct qed_ll2_ops { * * @param cdev * @param skb + * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. * * @return 0 on success, otherwise error value. */ - int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb); + int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags); /** * @brief register_cb_ops - protocol driver register the callback for Rx/Tx -- cgit v1.2.3 From 974f6c046ad90d071f96c76ff8ab29355a798c41 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:38 +0300 Subject: qed: LL2 flush isles when connection is closed Driver should free all pending isles once it gets a FLUSH cqe from FW. Part of iSCSI out of order flow. Fixes: 1d6cff4fca4366 ("qed: Add iSCSI out of order packet handling") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c3c1a99a0252..be63f0a2705d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -591,6 +591,27 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) } } +static bool +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, + struct core_rx_slow_path_cqe *p_cqe) +{ + struct ooo_opaque *iscsi_ooo; + u32 cid; + + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) + return false; + + iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; + if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + return false; + + /* Need to make a flush */ + cid = le32_to_cpu(iscsi_ooo->cid); + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); + + return true; +} + static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { @@ -617,6 +638,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cqe_type = cqe->rx_cqe_sp.type; + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, + &cqe->rx_cqe_sp)) + continue; + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", -- cgit v1.2.3 From 6291c608a908a9d62be650373bce7f734311d07c Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:39 +0300 Subject: qed: Fix possibility of list corruption during rmmod flows The ll2 flows of flushing the txq/rxq need to be synchronized with the regular fp processing. Caused list corruption during load/unload stress tests. Fixes: 0a7fb11c23c0f ("qed: Add Light L2 support") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index be63f0a2705d..4a78294d6481 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_tx_queue *p_tx; + unsigned long flags = 0; dma_addr_t tx_frag; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_tx = &p_ll2_conn->tx_queue; + spin_lock_irqsave(&p_tx->lock, flags); while (!list_empty(&p_tx->active_descq)) { p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + spin_unlock_irqrestore(&p_tx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag, b_last_packet); } + spin_lock_irqsave(&p_tx->lock, flags); } + spin_unlock_irqrestore(&p_tx->lock, flags); } static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_queue *p_rx; + unsigned long flags = 0; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_rx = &p_ll2_conn->rx_queue; + spin_lock_irqsave(&p_rx->lock, flags); while (!list_empty(&p_rx->active_descq)) { p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) break; - list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); + spin_unlock_irqrestore(&p_rx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) cookie, rx_buf_addr, b_last); } + spin_lock_irqsave(&p_rx->lock, flags); } + spin_unlock_irqrestore(&p_rx->lock, flags); } static bool -- cgit v1.2.3 From fc16f56b7bfd5bfe812ba2cd3a4c8943977e8306 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:40 +0300 Subject: qed: Fix LL2 race during connection terminate Stress on qedi/qedr load unload lead to list_del corruption. This is due to ll2 connection terminate freeing resources without verifying that no more ll2 processing will occur. This patch unregisters the ll2 status block before terminating the connection to assure this race does not occur. Fixes: 1d6cff4fca4366 ("qed: Add iSCSI out of order packet handling") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 4a78294d6481..c97ebd681c47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -829,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); if (rc) return rc; @@ -849,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); @@ -1915,17 +1921,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->tx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_txq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->rx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_rxq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); } if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) @@ -1974,16 +1988,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) if (!p_ll2_conn) return; - if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); - } - - if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); - } - kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); -- cgit v1.2.3 From dd006921d67f4a96f3d1fa763aad4d5dcd86959b Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:37 -0700 Subject: qed: Add MFW interfaces for TLV request support. The patch adds required management firmware (MFW) interfaces such as mailbox commands, TLV types etc. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 231 ++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b5f70eff0182..8e1e6e1eb40e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11863,6 +11863,8 @@ struct public_global { u32 running_bundle_id; s32 external_temperature; u32 mdump_reason; + u32 data_ptr; + u32 data_size; }; struct fw_flr_mb { @@ -12322,6 +12324,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 +#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 #define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F #define RESOURCE_CMD_REQ_RESC_SHIFT 0 @@ -12523,6 +12526,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_GET_TLV_REQ, MFW_DRV_MSG_MAX }; @@ -12558,6 +12562,233 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT +}; + struct nvm_cfg_mac_address { u32 mac_addr_hi; #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF -- cgit v1.2.3 From 2528c389936efbbece25088426fe7c3c91ff355f Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:38 -0700 Subject: qed: Add support for tlv request processing. The patch adds driver support for processing TLV requests/repsonses from the mfw and upper driver layers respectively. The implementation reads the requested TLVs from the shared memory, requests the values from upper layer drivers, populates this info (TLVs) shared memory and notifies MFW about the TLV values. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 5 + drivers/net/ethernet/qlogic/qed/qed_main.c | 6 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 53 ++++ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 378 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 37 +++ 6 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index c70cf2ad81c0..a0acb94d65f0 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o + qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index adcff495466e..dfdbe529e2bf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -92,6 +92,8 @@ struct qed_eth_cb_ops; struct qed_dev_info; union qed_mcp_protocol_stats; enum qed_mcp_protocol_type; +enum qed_mfw_tlv_type; +union qed_mfw_tlv_data; /* helpers */ #define QED_MFW_GET_FIELD(name, field) \ @@ -907,4 +909,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, int qed_slowpath_irq_req(struct qed_hwfn *hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, + enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_data); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9feed3b79cd4..cbf0ea97fc8e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2088,3 +2088,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, return; } } + +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_buf) +{ + return -EINVAL; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 250579ba632b..591877fca67a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -213,6 +213,40 @@ enum qed_ov_wol { QED_OV_WOL_ENABLED }; +enum qed_mfw_tlv_type { + QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ + QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ + QED_MFW_TLV_MAX = 0x4, +}; + +struct qed_mfw_tlv_generic { +#define QED_MFW_TLV_FLAGS_SIZE 2 + struct { + u8 ipv4_csum_offload; + u8 lso_supported; + bool b_set; + } flags; + +#define QED_MFW_TLV_MAC_COUNT 3 + /* First entry for primary MAC, 2 secondary MACs possible */ + u8 mac[QED_MFW_TLV_MAC_COUNT][6]; + bool mac_set[QED_MFW_TLV_MAC_COUNT]; + + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +union qed_mfw_tlv_data { + struct qed_mfw_tlv_generic generic; + struct qed_mfw_tlv_eth eth; +}; + /** * @brief - returns the link params of the hw function * @@ -561,6 +595,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct bist_nvm_image_att *p_image_att, u32 image_index); +/** + * @brief - Processes the TLV request from MFW i.e., get the required TLV info + * from the qed client and send it to the MFW. + * + * @param p_hwfn + * @param p_ptt + * + * @param return 0 upon success. + */ +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -621,6 +666,14 @@ struct qed_mcp_mb_params { u32 mcp_param; }; +struct qed_drv_tlv_hdr { + u8 tlv_type; + u8 tlv_length; /* In dwords - not including this header */ + u8 tlv_reserved; +#define QED_DRV_TLV_FLAGS_CHANGED 0x01 + u8 tlv_flags; +}; + /** * @brief Initialize the interface with the MCP * diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c new file mode 100644 index 000000000000..d58a7146cce5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define TLV_TYPE(p) (p[0]) +#define TLV_LENGTH(p) (p[1]) +#define TLV_FLAGS(p) (p[3]) + +#define QED_TLV_DATA_MAX (14) +struct qed_tlv_parsed_buf { + /* To be filled with the address to set in Value field */ + void *p_val; + + /* To be used internally in case the value has to be modified */ + u8 data[QED_TLV_DATA_MAX]; +}; + +static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) +{ + switch (tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + case DRV_TLV_OS_DRIVER_STATES: + case DRV_TLV_PXE_BOOT_PROGRESS: + case DRV_TLV_RX_FRAMES_RECEIVED: + case DRV_TLV_RX_BYTES_RECEIVED: + case DRV_TLV_TX_FRAMES_SENT: + case DRV_TLV_TX_BYTES_SENT: + case DRV_TLV_NPIV_ENABLED: + case DRV_TLV_PCIE_BUS_RX_UTILIZATION: + case DRV_TLV_PCIE_BUS_TX_UTILIZATION: + case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION: + case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED: + case DRV_TLV_NCSI_RX_BYTES_RECEIVED: + case DRV_TLV_NCSI_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_GENERIC; + break; + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + case DRV_TLV_PROMISCUOUS_MODE: + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_IOV_OFFLOAD: + case DRV_TLV_TX_QUEUES_EMPTY: + case DRV_TLV_RX_QUEUES_EMPTY: + case DRV_TLV_TX_QUEUES_FULL: + case DRV_TLV_RX_QUEUES_FULL: + *tlv_group |= QED_MFW_TLV_ETH; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Returns size of the data buffer or, -1 in case TLV data is not available. */ +static int +qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_generic *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + if (p_drv_buf->flags.b_set) { + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ? + 1 : 0; + p_buf->data[0] |= (p_drv_buf->flags.lso_supported ? + 1 : 0) << 1; + p_buf->p_val = p_buf->data; + return QED_MFW_TLV_FLAGS_SIZE; + } + break; + + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + { + int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR; + + if (p_drv_buf->mac_set[idx]) { + p_buf->p_val = p_drv_buf->mac[idx]; + return ETH_ALEN; + } + break; + } + + case DRV_TLV_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_eth *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + if (p_drv_buf->lso_maxoff_size_set) { + p_buf->p_val = &p_drv_buf->lso_maxoff_size; + return sizeof(p_drv_buf->lso_maxoff_size); + } + break; + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + if (p_drv_buf->lso_minseg_size_set) { + p_buf->p_val = &p_drv_buf->lso_minseg_size; + return sizeof(p_drv_buf->lso_minseg_size); + } + break; + case DRV_TLV_PROMISCUOUS_MODE: + if (p_drv_buf->prom_mode_set) { + p_buf->p_val = &p_drv_buf->prom_mode; + return sizeof(p_drv_buf->prom_mode); + } + break; + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->tx_descr_size; + return sizeof(p_drv_buf->tx_descr_size); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->rx_descr_size; + return sizeof(p_drv_buf->rx_descr_size); + } + break; + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + if (p_drv_buf->netq_count_set) { + p_buf->p_val = &p_drv_buf->netq_count; + return sizeof(p_drv_buf->netq_count); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + if (p_drv_buf->tcp4_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp4_offloads; + return sizeof(p_drv_buf->tcp4_offloads); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + if (p_drv_buf->tcp6_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp6_offloads; + return sizeof(p_drv_buf->tcp6_offloads); + } + break; + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_descr_qdepth; + return sizeof(p_drv_buf->tx_descr_qdepth); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_descr_qdepth; + return sizeof(p_drv_buf->rx_descr_qdepth); + } + break; + case DRV_TLV_IOV_OFFLOAD: + if (p_drv_buf->iov_offload_set) { + p_buf->p_val = &p_drv_buf->iov_offload; + return sizeof(p_drv_buf->iov_offload); + } + break; + case DRV_TLV_TX_QUEUES_EMPTY: + if (p_drv_buf->txqs_empty_set) { + p_buf->p_val = &p_drv_buf->txqs_empty; + return sizeof(p_drv_buf->txqs_empty); + } + break; + case DRV_TLV_RX_QUEUES_EMPTY: + if (p_drv_buf->rxqs_empty_set) { + p_buf->p_val = &p_drv_buf->rxqs_empty; + return sizeof(p_drv_buf->rxqs_empty); + } + break; + case DRV_TLV_TX_QUEUES_FULL: + if (p_drv_buf->num_txqs_full_set) { + p_buf->p_val = &p_drv_buf->num_txqs_full; + return sizeof(p_drv_buf->num_txqs_full); + } + break; + case DRV_TLV_RX_QUEUES_FULL: + if (p_drv_buf->num_rxqs_full_set) { + p_buf->p_val = &p_drv_buf->num_rxqs_full; + return sizeof(p_drv_buf->num_rxqs_full); + } + break; + default: + break; + } + + return -1; +} + +static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, + u8 tlv_group, u8 *p_mfw_buf, u32 size) +{ + union qed_mfw_tlv_data *p_tlv_data; + struct qed_tlv_parsed_buf buffer; + struct qed_drv_tlv_hdr tlv; + int len = 0; + u32 offset; + u8 *p_tlv; + + p_tlv_data = vzalloc(sizeof(*p_tlv_data)); + if (!p_tlv_data) + return -ENOMEM; + + if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) { + vfree(p_tlv_data); + return -EINVAL; + } + + memset(&tlv, 0, sizeof(tlv)); + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_tlv = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_tlv); + tlv.tlv_length = TLV_LENGTH(p_tlv); + tlv.tlv_flags = TLV_FLAGS(p_tlv); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Type %d length = %d flags = 0x%x\n", tlv.tlv_type, + tlv.tlv_length, tlv.tlv_flags); + + if (tlv_group == QED_MFW_TLV_GENERIC) + len = qed_mfw_get_gen_tlv_value(&tlv, + &p_tlv_data->generic, + &buffer); + else if (tlv_group == QED_MFW_TLV_ETH) + len = qed_mfw_get_eth_tlv_value(&tlv, + &p_tlv_data->eth, + &buffer); + + if (len > 0) { + WARN(len > 4 * tlv.tlv_length, + "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n", + len, 4 * tlv.tlv_length); + len = min_t(int, len, 4 * tlv.tlv_length); + tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED; + TLV_FLAGS(p_tlv) = tlv.tlv_flags; + memcpy(p_mfw_buf + offset + sizeof(tlv), + buffer.p_val, len); + } + } + + vfree(p_tlv_data); + + return 0; +} + +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr, size, offset, resp, param, val, global_offsize, global_addr; + u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp; + struct qed_drv_tlv_hdr tlv; + int rc; + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, data_ptr); + addr = qed_rd(p_hwfn, p_ptt, addr); + size = qed_rd(p_hwfn, p_ptt, global_addr + + offsetof(struct public_global, data_size)); + + if (!size) { + DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size); + goto drv_done; + } + + p_mfw_buf = vzalloc(size); + if (!p_mfw_buf) { + DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n"); + goto drv_done; + } + + /* Read the TLV request to local buffer. MFW represents the TLV in + * little endian format and mcp returns it bigendian format. Hence + * driver need to convert data to little endian first and then do the + * memcpy (casting) to preserve the MFW TLV format in the driver buffer. + * + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = qed_rd(p_hwfn, p_ptt, addr + offset); + val = be32_to_cpu(val); + memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); + } + + /* Parse the headers to enumerate the requested TLV groups */ + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Un recognized TLV %d\n", tlv.tlv_type); + } + + /* Sanitize the TLV groups according to personality */ + if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping L2 TLVs for non-L2 function\n"); + tlv_group &= ~QED_MFW_TLV_ETH; + } + + /* Update the TLV values in the local buffer */ + for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { + if (tlv_group & id) + if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) + goto drv_done; + } + + /* Write the TLV data to shared memory. The stream of 4 bytes first need + * to be mem-copied to u32 element to make it as LSB format. And then + * converted to big endian as required by mcp-write. + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); + val = cpu_to_be32(val); + qed_wr(p_hwfn, p_ptt, addr + offset, val); + } + +drv_done: + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, + ¶m); + + vfree(p_mfw_buf); + + return rc; +} diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 907976fd56f7..8e4fad427ac2 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -182,6 +182,43 @@ enum qed_led_mode { QED_LED_MODE_RESTORE }; +struct qed_mfw_tlv_eth { + u16 lso_maxoff_size; + bool lso_maxoff_size_set; + u16 lso_minseg_size; + bool lso_minseg_size_set; + u8 prom_mode; + bool prom_mode_set; + u16 tx_descr_size; + bool tx_descr_size_set; + u16 rx_descr_size; + bool rx_descr_size_set; + u16 netq_count; + bool netq_count_set; + u32 tcp4_offloads; + bool tcp4_offloads_set; + u32 tcp6_offloads; + bool tcp6_offloads_set; + u16 tx_descr_qdepth; + bool tx_descr_qdepth_set; + u16 rx_descr_qdepth; + bool rx_descr_qdepth_set; + u8 iov_offload; +#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0) +#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1) +#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2) +#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3) + bool iov_offload_set; + u8 txqs_empty; + bool txqs_empty_set; + u8 rxqs_empty; + bool rxqs_empty_set; + u8 num_txqs_full; + bool num_txqs_full_set; + u8 num_rxqs_full; + bool num_rxqs_full_set; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From f240b6882211aae7155a9839dff1426e2853fe30 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:39 -0700 Subject: qed: Add support for processing fcoe tlv request. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 828 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 193 ++++++ 3 files changed, 1024 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 591877fca67a..b31f5d8bd27c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -216,7 +216,8 @@ enum qed_ov_wol { enum qed_mfw_tlv_type { QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ - QED_MFW_TLV_MAX = 0x4, + QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ + QED_MFW_TLV_MAX = 0x8, }; struct qed_mfw_tlv_generic { @@ -245,6 +246,7 @@ struct qed_mfw_tlv_generic { union qed_mfw_tlv_data { struct qed_mfw_tlv_generic generic; struct qed_mfw_tlv_eth eth; + struct qed_mfw_tlv_fcoe fcoe; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index d58a7146cce5..1873cfc08715 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -64,6 +64,157 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) case DRV_TLV_RX_QUEUES_FULL: *tlv_group |= QED_MFW_TLV_ETH; break; + case DRV_TLV_SCSI_TO: + case DRV_TLV_R_T_TOV: + case DRV_TLV_R_A_TOV: + case DRV_TLV_E_D_TOV: + case DRV_TLV_CR_TOV: + case DRV_TLV_BOOT_TYPE: + case DRV_TLV_NPIV_STATE: + case DRV_TLV_NUM_OF_NPIV_IDS: + case DRV_TLV_SWITCH_NAME: + case DRV_TLV_SWITCH_PORT_NUM: + case DRV_TLV_SWITCH_PORT_ID: + case DRV_TLV_VENDOR_NAME: + case DRV_TLV_SWITCH_MODEL: + case DRV_TLV_SWITCH_FW_VER: + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + case DRV_TLV_PORT_ALIAS: + case DRV_TLV_PORT_STATE: + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_LINK_FAILURE_COUNT: + case DRV_TLV_FCOE_BOOT_PROGRESS: + case DRV_TLV_RX_BROADCAST_PACKETS: + case DRV_TLV_TX_BROADCAST_PACKETS: + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + case DRV_TLV_FCOE_TX_FRAMES_SENT: + case DRV_TLV_FCOE_TX_BYTES_SENT: + case DRV_TLV_CRC_ERROR_COUNT: + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + case DRV_TLV_DISPARITY_ERROR_COUNT: + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_RJT: + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + case DRV_TLV_FDISCS_SENT_COUNT: + case DRV_TLV_FDISC_ACCS_RECEIVED: + case DRV_TLV_FDISC_RJTS_RECEIVED: + case DRV_TLV_PLOGI_SENT_COUNT: + case DRV_TLV_PLOGI_ACCS_RECEIVED: + case DRV_TLV_PLOGI_RJTS_RECEIVED: + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_TIMESTAMP: + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + case DRV_TLV_LOGOS_ISSUED: + case DRV_TLV_LOGO_ACCS_RECEIVED: + case DRV_TLV_LOGO_RJTS_RECEIVED: + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_TIMESTAMP: + case DRV_TLV_LOGOS_RECEIVED: + case DRV_TLV_ACCS_ISSUED: + case DRV_TLV_PRLIS_ISSUED: + case DRV_TLV_ACCS_RECEIVED: + case DRV_TLV_ABTS_SENT_COUNT: + case DRV_TLV_ABTS_ACCS_RECEIVED: + case DRV_TLV_ABTS_RJTS_RECEIVED: + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_TIMESTAMP: + case DRV_TLV_RSCNS_RECEIVED: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + case DRV_TLV_LUN_RESETS_ISSUED: + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + case DRV_TLV_TPRLOS_SENT: + case DRV_TLV_NOS_SENT_COUNT: + case DRV_TLV_NOS_RECEIVED_COUNT: + case DRV_TLV_OLS_COUNT: + case DRV_TLV_LR_COUNT: + case DRV_TLV_LRR_COUNT: + case DRV_TLV_LIP_SENT_COUNT: + case DRV_TLV_LIP_RECEIVED_COUNT: + case DRV_TLV_EOFA_COUNT: + case DRV_TLV_EOFNI_COUNT: + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + *tlv_group = QED_MFW_TLV_FCOE; + break; default: return -EINVAL; } @@ -237,6 +388,672 @@ qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, return -1; } +static int +qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, + struct qed_tlv_parsed_buf *p_buf) +{ + if (!p_time->b_set) + return -1; + + /* Validate numbers */ + if (p_time->month > 12) + p_time->month = 0; + if (p_time->day > 31) + p_time->day = 0; + if (p_time->hour > 23) + p_time->hour = 0; + if (p_time->min > 59) + p_time->hour = 0; + if (p_time->msec > 999) + p_time->msec = 0; + if (p_time->usec > 999) + p_time->usec = 0; + + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + snprintf(p_buf->data, 14, "%d%d%d%d%d%d", + p_time->month, p_time->day, + p_time->hour, p_time->min, p_time->msec, p_time->usec); + + p_buf->p_val = p_buf->data; + + return QED_MFW_TLV_TIME_SIZE; +} + +static int +qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_fcoe *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + struct qed_mfw_tlv_time *p_time; + u8 idx; + + switch (p_tlv->tlv_type) { + case DRV_TLV_SCSI_TO: + if (p_drv_buf->scsi_timeout_set) { + p_buf->p_val = &p_drv_buf->scsi_timeout; + return sizeof(p_drv_buf->scsi_timeout); + } + break; + case DRV_TLV_R_T_TOV: + if (p_drv_buf->rt_tov_set) { + p_buf->p_val = &p_drv_buf->rt_tov; + return sizeof(p_drv_buf->rt_tov); + } + break; + case DRV_TLV_R_A_TOV: + if (p_drv_buf->ra_tov_set) { + p_buf->p_val = &p_drv_buf->ra_tov; + return sizeof(p_drv_buf->ra_tov); + } + break; + case DRV_TLV_E_D_TOV: + if (p_drv_buf->ed_tov_set) { + p_buf->p_val = &p_drv_buf->ed_tov; + return sizeof(p_drv_buf->ed_tov); + } + break; + case DRV_TLV_CR_TOV: + if (p_drv_buf->cr_tov_set) { + p_buf->p_val = &p_drv_buf->cr_tov; + return sizeof(p_drv_buf->cr_tov); + } + break; + case DRV_TLV_BOOT_TYPE: + if (p_drv_buf->boot_type_set) { + p_buf->p_val = &p_drv_buf->boot_type; + return sizeof(p_drv_buf->boot_type); + } + break; + case DRV_TLV_NPIV_STATE: + if (p_drv_buf->npiv_state_set) { + p_buf->p_val = &p_drv_buf->npiv_state; + return sizeof(p_drv_buf->npiv_state); + } + break; + case DRV_TLV_NUM_OF_NPIV_IDS: + if (p_drv_buf->num_npiv_ids_set) { + p_buf->p_val = &p_drv_buf->num_npiv_ids; + return sizeof(p_drv_buf->num_npiv_ids); + } + break; + case DRV_TLV_SWITCH_NAME: + if (p_drv_buf->switch_name_set) { + p_buf->p_val = &p_drv_buf->switch_name; + return sizeof(p_drv_buf->switch_name); + } + break; + case DRV_TLV_SWITCH_PORT_NUM: + if (p_drv_buf->switch_portnum_set) { + p_buf->p_val = &p_drv_buf->switch_portnum; + return sizeof(p_drv_buf->switch_portnum); + } + break; + case DRV_TLV_SWITCH_PORT_ID: + if (p_drv_buf->switch_portid_set) { + p_buf->p_val = &p_drv_buf->switch_portid; + return sizeof(p_drv_buf->switch_portid); + } + break; + case DRV_TLV_VENDOR_NAME: + if (p_drv_buf->vendor_name_set) { + p_buf->p_val = &p_drv_buf->vendor_name; + return sizeof(p_drv_buf->vendor_name); + } + break; + case DRV_TLV_SWITCH_MODEL: + if (p_drv_buf->switch_model_set) { + p_buf->p_val = &p_drv_buf->switch_model; + return sizeof(p_drv_buf->switch_model); + } + break; + case DRV_TLV_SWITCH_FW_VER: + if (p_drv_buf->switch_fw_version_set) { + p_buf->p_val = &p_drv_buf->switch_fw_version; + return sizeof(p_drv_buf->switch_fw_version); + } + break; + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + if (p_drv_buf->qos_pri_set) { + p_buf->p_val = &p_drv_buf->qos_pri; + return sizeof(p_drv_buf->qos_pri); + } + break; + case DRV_TLV_PORT_ALIAS: + if (p_drv_buf->port_alias_set) { + p_buf->p_val = &p_drv_buf->port_alias; + return sizeof(p_drv_buf->port_alias); + } + break; + case DRV_TLV_PORT_STATE: + if (p_drv_buf->port_state_set) { + p_buf->p_val = &p_drv_buf->port_state; + return sizeof(p_drv_buf->port_state); + } + break; + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_tx_descr_size; + return sizeof(p_drv_buf->fip_tx_descr_size); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_rx_descr_size; + return sizeof(p_drv_buf->fip_rx_descr_size); + } + break; + case DRV_TLV_LINK_FAILURE_COUNT: + if (p_drv_buf->link_failures_set) { + p_buf->p_val = &p_drv_buf->link_failures; + return sizeof(p_drv_buf->link_failures); + } + break; + case DRV_TLV_FCOE_BOOT_PROGRESS: + if (p_drv_buf->fcoe_boot_progress_set) { + p_buf->p_val = &p_drv_buf->fcoe_boot_progress; + return sizeof(p_drv_buf->fcoe_boot_progress); + } + break; + case DRV_TLV_RX_BROADCAST_PACKETS: + if (p_drv_buf->rx_bcast_set) { + p_buf->p_val = &p_drv_buf->rx_bcast; + return sizeof(p_drv_buf->rx_bcast); + } + break; + case DRV_TLV_TX_BROADCAST_PACKETS: + if (p_drv_buf->tx_bcast_set) { + p_buf->p_val = &p_drv_buf->tx_bcast; + return sizeof(p_drv_buf->tx_bcast); + } + break; + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_txq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_txq_depth; + return sizeof(p_drv_buf->fcoe_txq_depth); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_rxq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_rxq_depth; + return sizeof(p_drv_buf->fcoe_rxq_depth); + } + break; + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + if (p_drv_buf->fcoe_rx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_frames; + return sizeof(p_drv_buf->fcoe_rx_frames); + } + break; + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + if (p_drv_buf->fcoe_rx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_bytes; + return sizeof(p_drv_buf->fcoe_rx_bytes); + } + break; + case DRV_TLV_FCOE_TX_FRAMES_SENT: + if (p_drv_buf->fcoe_tx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_frames; + return sizeof(p_drv_buf->fcoe_tx_frames); + } + break; + case DRV_TLV_FCOE_TX_BYTES_SENT: + if (p_drv_buf->fcoe_tx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_bytes; + return sizeof(p_drv_buf->fcoe_tx_bytes); + } + break; + case DRV_TLV_CRC_ERROR_COUNT: + if (p_drv_buf->crc_count_set) { + p_buf->p_val = &p_drv_buf->crc_count; + return sizeof(p_drv_buf->crc_count); + } + break; + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->crc_err_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx]; + return sizeof(p_drv_buf->crc_err_src_fcid[idx]); + } + break; + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx], + p_buf); + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + if (p_drv_buf->losync_err_set) { + p_buf->p_val = &p_drv_buf->losync_err; + return sizeof(p_drv_buf->losync_err); + } + break; + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + if (p_drv_buf->losig_err_set) { + p_buf->p_val = &p_drv_buf->losig_err; + return sizeof(p_drv_buf->losig_err); + } + break; + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + if (p_drv_buf->primtive_err_set) { + p_buf->p_val = &p_drv_buf->primtive_err; + return sizeof(p_drv_buf->primtive_err); + } + break; + case DRV_TLV_DISPARITY_ERROR_COUNT: + if (p_drv_buf->disparity_err_set) { + p_buf->p_val = &p_drv_buf->disparity_err; + return sizeof(p_drv_buf->disparity_err); + } + break; + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + if (p_drv_buf->code_violation_err_set) { + p_buf->p_val = &p_drv_buf->code_violation_err; + return sizeof(p_drv_buf->code_violation_err); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1; + if (p_drv_buf->flogi_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_param[idx]; + return sizeof(p_drv_buf->flogi_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1; + + if (p_drv_buf->flogi_acc_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_acc_param[idx]; + return sizeof(p_drv_buf->flogi_acc_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_RJT: + if (p_drv_buf->flogi_rjt_set) { + p_buf->p_val = &p_drv_buf->flogi_rjt; + return sizeof(p_drv_buf->flogi_rjt); + } + break; + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp, + p_buf); + case DRV_TLV_FDISCS_SENT_COUNT: + if (p_drv_buf->fdiscs_set) { + p_buf->p_val = &p_drv_buf->fdiscs; + return sizeof(p_drv_buf->fdiscs); + } + break; + case DRV_TLV_FDISC_ACCS_RECEIVED: + if (p_drv_buf->fdisc_acc_set) { + p_buf->p_val = &p_drv_buf->fdisc_acc; + return sizeof(p_drv_buf->fdisc_acc); + } + break; + case DRV_TLV_FDISC_RJTS_RECEIVED: + if (p_drv_buf->fdisc_rjt_set) { + p_buf->p_val = &p_drv_buf->fdisc_rjt; + return sizeof(p_drv_buf->fdisc_rjt); + } + break; + case DRV_TLV_PLOGI_SENT_COUNT: + if (p_drv_buf->plogi_set) { + p_buf->p_val = &p_drv_buf->plogi; + return sizeof(p_drv_buf->plogi); + } + break; + case DRV_TLV_PLOGI_ACCS_RECEIVED: + if (p_drv_buf->plogi_acc_set) { + p_buf->p_val = &p_drv_buf->plogi_acc; + return sizeof(p_drv_buf->plogi_acc); + } + break; + case DRV_TLV_PLOGI_RJTS_RECEIVED: + if (p_drv_buf->plogi_rjt_set) { + p_buf->p_val = &p_drv_buf->plogi_rjt; + return sizeof(p_drv_buf->plogi_rjt); + } + break; + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->plogi_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx]; + return sizeof(p_drv_buf->plogi_dst_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx], + p_buf); + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->plogi_acc_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2; + p_time = &p_drv_buf->plogi_acc_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + case DRV_TLV_LOGOS_ISSUED: + if (p_drv_buf->tx_plogos_set) { + p_buf->p_val = &p_drv_buf->tx_plogos; + return sizeof(p_drv_buf->tx_plogos); + } + break; + case DRV_TLV_LOGO_ACCS_RECEIVED: + if (p_drv_buf->plogo_acc_set) { + p_buf->p_val = &p_drv_buf->plogo_acc; + return sizeof(p_drv_buf->plogo_acc); + } + break; + case DRV_TLV_LOGO_RJTS_RECEIVED: + if (p_drv_buf->plogo_rjt_set) { + p_buf->p_val = &p_drv_buf->plogo_rjt; + return sizeof(p_drv_buf->plogo_rjt); + } + break; + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) / + 2; + + if (p_drv_buf->plogo_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx]; + return sizeof(p_drv_buf->plogo_src_fcid[idx]); + } + break; + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx], + p_buf); + case DRV_TLV_LOGOS_RECEIVED: + if (p_drv_buf->rx_logos_set) { + p_buf->p_val = &p_drv_buf->rx_logos; + return sizeof(p_drv_buf->rx_logos); + } + break; + case DRV_TLV_ACCS_ISSUED: + if (p_drv_buf->tx_accs_set) { + p_buf->p_val = &p_drv_buf->tx_accs; + return sizeof(p_drv_buf->tx_accs); + } + break; + case DRV_TLV_PRLIS_ISSUED: + if (p_drv_buf->tx_prlis_set) { + p_buf->p_val = &p_drv_buf->tx_prlis; + return sizeof(p_drv_buf->tx_prlis); + } + break; + case DRV_TLV_ACCS_RECEIVED: + if (p_drv_buf->rx_accs_set) { + p_buf->p_val = &p_drv_buf->rx_accs; + return sizeof(p_drv_buf->rx_accs); + } + break; + case DRV_TLV_ABTS_SENT_COUNT: + if (p_drv_buf->tx_abts_set) { + p_buf->p_val = &p_drv_buf->tx_abts; + return sizeof(p_drv_buf->tx_abts); + } + break; + case DRV_TLV_ABTS_ACCS_RECEIVED: + if (p_drv_buf->rx_abts_acc_set) { + p_buf->p_val = &p_drv_buf->rx_abts_acc; + return sizeof(p_drv_buf->rx_abts_acc); + } + break; + case DRV_TLV_ABTS_RJTS_RECEIVED: + if (p_drv_buf->rx_abts_rjt_set) { + p_buf->p_val = &p_drv_buf->rx_abts_rjt; + return sizeof(p_drv_buf->rx_abts_rjt); + } + break; + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->abts_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx]; + return sizeof(p_drv_buf->abts_dst_fcid[idx]); + } + break; + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx], + p_buf); + case DRV_TLV_RSCNS_RECEIVED: + if (p_drv_buf->rx_rscn_set) { + p_buf->p_val = &p_drv_buf->rx_rscn; + return sizeof(p_drv_buf->rx_rscn); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1; + + if (p_drv_buf->rx_rscn_nport_set[idx]) { + p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx]; + return sizeof(p_drv_buf->rx_rscn_nport[idx]); + } + break; + case DRV_TLV_LUN_RESETS_ISSUED: + if (p_drv_buf->tx_lun_rst_set) { + p_buf->p_val = &p_drv_buf->tx_lun_rst; + return sizeof(p_drv_buf->tx_lun_rst); + } + break; + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + if (p_drv_buf->abort_task_sets_set) { + p_buf->p_val = &p_drv_buf->abort_task_sets; + return sizeof(p_drv_buf->abort_task_sets); + } + break; + case DRV_TLV_TPRLOS_SENT: + if (p_drv_buf->tx_tprlos_set) { + p_buf->p_val = &p_drv_buf->tx_tprlos; + return sizeof(p_drv_buf->tx_tprlos); + } + break; + case DRV_TLV_NOS_SENT_COUNT: + if (p_drv_buf->tx_nos_set) { + p_buf->p_val = &p_drv_buf->tx_nos; + return sizeof(p_drv_buf->tx_nos); + } + break; + case DRV_TLV_NOS_RECEIVED_COUNT: + if (p_drv_buf->rx_nos_set) { + p_buf->p_val = &p_drv_buf->rx_nos; + return sizeof(p_drv_buf->rx_nos); + } + break; + case DRV_TLV_OLS_COUNT: + if (p_drv_buf->ols_set) { + p_buf->p_val = &p_drv_buf->ols; + return sizeof(p_drv_buf->ols); + } + break; + case DRV_TLV_LR_COUNT: + if (p_drv_buf->lr_set) { + p_buf->p_val = &p_drv_buf->lr; + return sizeof(p_drv_buf->lr); + } + break; + case DRV_TLV_LRR_COUNT: + if (p_drv_buf->lrr_set) { + p_buf->p_val = &p_drv_buf->lrr; + return sizeof(p_drv_buf->lrr); + } + break; + case DRV_TLV_LIP_SENT_COUNT: + if (p_drv_buf->tx_lip_set) { + p_buf->p_val = &p_drv_buf->tx_lip; + return sizeof(p_drv_buf->tx_lip); + } + break; + case DRV_TLV_LIP_RECEIVED_COUNT: + if (p_drv_buf->rx_lip_set) { + p_buf->p_val = &p_drv_buf->rx_lip; + return sizeof(p_drv_buf->rx_lip); + } + break; + case DRV_TLV_EOFA_COUNT: + if (p_drv_buf->eofa_set) { + p_buf->p_val = &p_drv_buf->eofa; + return sizeof(p_drv_buf->eofa); + } + break; + case DRV_TLV_EOFNI_COUNT: + if (p_drv_buf->eofni_set) { + p_buf->p_val = &p_drv_buf->eofni; + return sizeof(p_drv_buf->eofni); + } + break; + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + if (p_drv_buf->scsi_chks_set) { + p_buf->p_val = &p_drv_buf->scsi_chks; + return sizeof(p_drv_buf->scsi_chks); + } + break; + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_cond_met; + return sizeof(p_drv_buf->scsi_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + if (p_drv_buf->scsi_busy_set) { + p_buf->p_val = &p_drv_buf->scsi_busy; + return sizeof(p_drv_buf->scsi_busy); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + if (p_drv_buf->scsi_inter_set) { + p_buf->p_val = &p_drv_buf->scsi_inter; + return sizeof(p_drv_buf->scsi_inter); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_inter_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_inter_cond_met; + return sizeof(p_drv_buf->scsi_inter_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + if (p_drv_buf->scsi_rsv_conflicts_set) { + p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts; + return sizeof(p_drv_buf->scsi_rsv_conflicts); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + if (p_drv_buf->scsi_tsk_full_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_full; + return sizeof(p_drv_buf->scsi_tsk_full); + } + break; + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + if (p_drv_buf->scsi_aca_active_set) { + p_buf->p_val = &p_drv_buf->scsi_aca_active; + return sizeof(p_drv_buf->scsi_aca_active); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + if (p_drv_buf->scsi_tsk_abort_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_abort; + return sizeof(p_drv_buf->scsi_tsk_abort); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + idx = (p_tlv->tlv_type - + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2; + + if (p_drv_buf->scsi_rx_chk_set[idx]) { + p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx]; + return sizeof(p_drv_buf->scsi_rx_chk[idx]); + } + break; + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2; + p_time = &p_drv_buf->scsi_chk_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + default: + break; + } + + return -1; +} + static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, u8 tlv_group, u8 *p_mfw_buf, u32 size) { @@ -276,6 +1093,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, len = qed_mfw_get_eth_tlv_value(&tlv, &p_tlv_data->eth, &buffer); + else if (tlv_group == QED_MFW_TLV_FCOE) + len = qed_mfw_get_fcoe_tlv_value(&tlv, + &p_tlv_data->fcoe, + &buffer); if (len > 0) { WARN(len > 4 * tlv.tlv_length, @@ -351,6 +1172,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) tlv_group &= ~QED_MFW_TLV_ETH; } + if ((tlv_group & QED_MFW_TLV_FCOE) && + p_hwfn->hw_info.personality != QED_PCI_FCOE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping FCoE TLVs for non-FCoE function\n"); + tlv_group &= ~QED_MFW_TLV_FCOE; + } + /* Update the TLV values in the local buffer */ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { if (tlv_group & id) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8e4fad427ac2..74c2b9ad8afa 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -219,6 +219,199 @@ struct qed_mfw_tlv_eth { bool num_rxqs_full_set; }; +#define QED_MFW_TLV_TIME_SIZE 14 +struct qed_mfw_tlv_time { + bool b_set; + u8 month; + u8 day; + u8 hour; + u8 min; + u16 msec; + u16 usec; +}; + +struct qed_mfw_tlv_fcoe { + u8 scsi_timeout; + bool scsi_timeout_set; + u32 rt_tov; + bool rt_tov_set; + u32 ra_tov; + bool ra_tov_set; + u32 ed_tov; + bool ed_tov_set; + u32 cr_tov; + bool cr_tov_set; + u8 boot_type; + bool boot_type_set; + u8 npiv_state; + bool npiv_state_set; + u32 num_npiv_ids; + bool num_npiv_ids_set; + u8 switch_name[8]; + bool switch_name_set; + u16 switch_portnum; + bool switch_portnum_set; + u8 switch_portid[3]; + bool switch_portid_set; + u8 vendor_name[8]; + bool vendor_name_set; + u8 switch_model[8]; + bool switch_model_set; + u8 switch_fw_version[8]; + bool switch_fw_version_set; + u8 qos_pri; + bool qos_pri_set; + u8 port_alias[3]; + bool port_alias_set; + u8 port_state; +#define QED_MFW_TLV_PORT_STATE_OFFLINE (0) +#define QED_MFW_TLV_PORT_STATE_LOOP (1) +#define QED_MFW_TLV_PORT_STATE_P2P (2) +#define QED_MFW_TLV_PORT_STATE_FABRIC (3) + bool port_state_set; + u16 fip_tx_descr_size; + bool fip_tx_descr_size_set; + u16 fip_rx_descr_size; + bool fip_rx_descr_size_set; + u16 link_failures; + bool link_failures_set; + u8 fcoe_boot_progress; + bool fcoe_boot_progress_set; + u64 rx_bcast; + bool rx_bcast_set; + u64 tx_bcast; + bool tx_bcast_set; + u16 fcoe_txq_depth; + bool fcoe_txq_depth_set; + u16 fcoe_rxq_depth; + bool fcoe_rxq_depth_set; + u64 fcoe_rx_frames; + bool fcoe_rx_frames_set; + u64 fcoe_rx_bytes; + bool fcoe_rx_bytes_set; + u64 fcoe_tx_frames; + bool fcoe_tx_frames_set; + u64 fcoe_tx_bytes; + bool fcoe_tx_bytes_set; + u16 crc_count; + bool crc_count_set; + u32 crc_err_src_fcid[5]; + bool crc_err_src_fcid_set[5]; + struct qed_mfw_tlv_time crc_err[5]; + u16 losync_err; + bool losync_err_set; + u16 losig_err; + bool losig_err_set; + u16 primtive_err; + bool primtive_err_set; + u16 disparity_err; + bool disparity_err_set; + u16 code_violation_err; + bool code_violation_err_set; + u32 flogi_param[4]; + bool flogi_param_set[4]; + struct qed_mfw_tlv_time flogi_tstamp; + u32 flogi_acc_param[4]; + bool flogi_acc_param_set[4]; + struct qed_mfw_tlv_time flogi_acc_tstamp; + u32 flogi_rjt; + bool flogi_rjt_set; + struct qed_mfw_tlv_time flogi_rjt_tstamp; + u32 fdiscs; + bool fdiscs_set; + u8 fdisc_acc; + bool fdisc_acc_set; + u8 fdisc_rjt; + bool fdisc_rjt_set; + u8 plogi; + bool plogi_set; + u8 plogi_acc; + bool plogi_acc_set; + u8 plogi_rjt; + bool plogi_rjt_set; + u32 plogi_dst_fcid[5]; + bool plogi_dst_fcid_set[5]; + struct qed_mfw_tlv_time plogi_tstamp[5]; + u32 plogi_acc_src_fcid[5]; + bool plogi_acc_src_fcid_set[5]; + struct qed_mfw_tlv_time plogi_acc_tstamp[5]; + u8 tx_plogos; + bool tx_plogos_set; + u8 plogo_acc; + bool plogo_acc_set; + u8 plogo_rjt; + bool plogo_rjt_set; + u32 plogo_src_fcid[5]; + bool plogo_src_fcid_set[5]; + struct qed_mfw_tlv_time plogo_tstamp[5]; + u8 rx_logos; + bool rx_logos_set; + u8 tx_accs; + bool tx_accs_set; + u8 tx_prlis; + bool tx_prlis_set; + u8 rx_accs; + bool rx_accs_set; + u8 tx_abts; + bool tx_abts_set; + u8 rx_abts_acc; + bool rx_abts_acc_set; + u8 rx_abts_rjt; + bool rx_abts_rjt_set; + u32 abts_dst_fcid[5]; + bool abts_dst_fcid_set[5]; + struct qed_mfw_tlv_time abts_tstamp[5]; + u8 rx_rscn; + bool rx_rscn_set; + u32 rx_rscn_nport[4]; + bool rx_rscn_nport_set[4]; + u8 tx_lun_rst; + bool tx_lun_rst_set; + u8 abort_task_sets; + bool abort_task_sets_set; + u8 tx_tprlos; + bool tx_tprlos_set; + u8 tx_nos; + bool tx_nos_set; + u8 rx_nos; + bool rx_nos_set; + u8 ols; + bool ols_set; + u8 lr; + bool lr_set; + u8 lrr; + bool lrr_set; + u8 tx_lip; + bool tx_lip_set; + u8 rx_lip; + bool rx_lip_set; + u8 eofa; + bool eofa_set; + u8 eofni; + bool eofni_set; + u8 scsi_chks; + bool scsi_chks_set; + u8 scsi_cond_met; + bool scsi_cond_met_set; + u8 scsi_busy; + bool scsi_busy_set; + u8 scsi_inter; + bool scsi_inter_set; + u8 scsi_inter_cond_met; + bool scsi_inter_cond_met_set; + u8 scsi_rsv_conflicts; + bool scsi_rsv_conflicts_set; + u8 scsi_tsk_full; + bool scsi_tsk_full_set; + u8 scsi_aca_active; + bool scsi_aca_active_set; + u8 scsi_tsk_abort; + bool scsi_tsk_abort_set; + u32 scsi_rx_chk[5]; + bool scsi_rx_chk_set[5]; + struct qed_mfw_tlv_time scsi_chk_tstamp[5]; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From 77a509e4f6d14c2e09acbab5a89a769740bda62c Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:40 -0700 Subject: qed: Add support for processing iscsi tlv request. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 131 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 36 +++++++ 3 files changed, 170 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index b31f5d8bd27c..632a838f1fe3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -217,7 +217,8 @@ enum qed_mfw_tlv_type { QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ - QED_MFW_TLV_MAX = 0x8, + QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ + QED_MFW_TLV_MAX = 0x16, }; struct qed_mfw_tlv_generic { @@ -247,6 +248,7 @@ union qed_mfw_tlv_data { struct qed_mfw_tlv_generic generic; struct qed_mfw_tlv_eth eth; struct qed_mfw_tlv_fcoe fcoe; + struct qed_mfw_tlv_iscsi iscsi; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 1873cfc08715..6c16158d8090 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -215,6 +215,23 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: *tlv_group = QED_MFW_TLV_FCOE; break; + case DRV_TLV_TARGET_LLMNR_ENABLED: + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + case DRV_TLV_AUTHENTICATION_METHOD: + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + case DRV_TLV_MAX_FRAME_SIZE: + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_ISCSI_BOOT_PROGRESS: + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_ISCSI; + break; default: return -EINVAL; } @@ -1054,6 +1071,109 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, return -1; } +static int +qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_iscsi *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_TARGET_LLMNR_ENABLED: + if (p_drv_buf->target_llmnr_set) { + p_buf->p_val = &p_drv_buf->target_llmnr; + return sizeof(p_drv_buf->target_llmnr); + } + break; + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + if (p_drv_buf->header_digest_set) { + p_buf->p_val = &p_drv_buf->header_digest; + return sizeof(p_drv_buf->header_digest); + } + break; + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + if (p_drv_buf->data_digest_set) { + p_buf->p_val = &p_drv_buf->data_digest; + return sizeof(p_drv_buf->data_digest); + } + break; + case DRV_TLV_AUTHENTICATION_METHOD: + if (p_drv_buf->auth_method_set) { + p_buf->p_val = &p_drv_buf->auth_method; + return sizeof(p_drv_buf->auth_method); + } + break; + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + if (p_drv_buf->boot_taget_portal_set) { + p_buf->p_val = &p_drv_buf->boot_taget_portal; + return sizeof(p_drv_buf->boot_taget_portal); + } + break; + case DRV_TLV_MAX_FRAME_SIZE: + if (p_drv_buf->frame_size_set) { + p_buf->p_val = &p_drv_buf->frame_size; + return sizeof(p_drv_buf->frame_size); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_desc_size_set) { + p_buf->p_val = &p_drv_buf->tx_desc_size; + return sizeof(p_drv_buf->tx_desc_size); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_desc_size_set) { + p_buf->p_val = &p_drv_buf->rx_desc_size; + return sizeof(p_drv_buf->rx_desc_size); + } + break; + case DRV_TLV_ISCSI_BOOT_PROGRESS: + if (p_drv_buf->boot_progress_set) { + p_buf->p_val = &p_drv_buf->boot_progress; + return sizeof(p_drv_buf->boot_progress); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_desc_qdepth; + return sizeof(p_drv_buf->tx_desc_qdepth); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_desc_qdepth; + return sizeof(p_drv_buf->rx_desc_qdepth); + } + break; + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, u8 tlv_group, u8 *p_mfw_buf, u32 size) { @@ -1097,6 +1217,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, len = qed_mfw_get_fcoe_tlv_value(&tlv, &p_tlv_data->fcoe, &buffer); + else + len = qed_mfw_get_iscsi_tlv_value(&tlv, + &p_tlv_data->iscsi, + &buffer); if (len > 0) { WARN(len > 4 * tlv.tlv_length, @@ -1179,6 +1303,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) tlv_group &= ~QED_MFW_TLV_FCOE; } + if ((tlv_group & QED_MFW_TLV_ISCSI) && + p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping iSCSI TLVs for non-iSCSI function\n"); + tlv_group &= ~QED_MFW_TLV_ISCSI; + } + /* Update the TLV values in the local buffer */ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { if (tlv_group & id) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 74c2b9ad8afa..92b5352287df 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -412,6 +412,42 @@ struct qed_mfw_tlv_fcoe { struct qed_mfw_tlv_time scsi_chk_tstamp[5]; }; +struct qed_mfw_tlv_iscsi { + u8 target_llmnr; + bool target_llmnr_set; + u8 header_digest; + bool header_digest_set; + u8 data_digest; + bool data_digest_set; + u8 auth_method; +#define QED_MFW_TLV_AUTH_METHOD_NONE (1) +#define QED_MFW_TLV_AUTH_METHOD_CHAP (2) +#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3) + bool auth_method_set; + u16 boot_taget_portal; + bool boot_taget_portal_set; + u16 frame_size; + bool frame_size_set; + u16 tx_desc_size; + bool tx_desc_size_set; + u16 rx_desc_size; + bool rx_desc_size_set; + u8 boot_progress; + bool boot_progress_set; + u16 tx_desc_qdepth; + bool tx_desc_qdepth_set; + u16 rx_desc_qdepth; + bool rx_desc_qdepth_set; + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From 59ccf86fe69a6a77afebe706913d6b551d84d5bc Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:41 -0700 Subject: qed: Add driver infrastucture for handling mfw requests. MFW requests the TLVs in interrupt context. Extracting of the required data from upper layers and populating of the TLVs require process context. The patch adds work-queues for processing the tlv requests. It also adds the implementation for requesting the tlv values from appropriate protocol driver. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 8 ++ drivers/net/ethernet/qlogic/qed/qed_main.c | 151 ++++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 2 + include/linux/qed/qed_if.h | 10 ++ 4 files changed, 170 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index dfdbe529e2bf..00db3401b898 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -515,6 +515,10 @@ struct qed_simd_fp_handler { void (*func)(void *); }; +enum qed_slowpath_wq_flag { + QED_SLOWPATH_MFW_TLV_REQ, +}; + struct qed_hwfn { struct qed_dev *cdev; u8 my_id; /* ID inside the PF */ @@ -644,6 +648,9 @@ struct qed_hwfn { #endif struct z_stream_s *stream; + struct workqueue_struct *slowpath_wq; + struct delayed_work slowpath_task; + unsigned long slowpath_task_flags; }; struct pci_params { @@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_tlv_req(struct qed_hwfn *hwfn); int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index cbf0ea97fc8e..68c4399ffd50 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +static void qed_slowpath_wq_stop(struct qed_dev *cdev) +{ + int i; + + if (IS_VF(cdev)) + return; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].slowpath_wq) + continue; + + flush_workqueue(cdev->hwfns[i].slowpath_wq); + destroy_workqueue(cdev->hwfns[i].slowpath_wq); + } +} + +static void qed_slowpath_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + slowpath_task.work); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + return; + } + + if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, + &hwfn->slowpath_task_flags)) + qed_mfw_process_tlv_req(hwfn, ptt); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_slowpath_wq_start(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + char name[NAME_SIZE]; + int i; + + if (IS_VF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + + snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + + hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); + if (!hwfn->slowpath_wq) { + DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + } + + return 0; +} + static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { @@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (qed_iov_wq_start(cdev)) goto err; + if (qed_slowpath_wq_start(cdev)) + goto err; + if (IS_PF(cdev)) { rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, &cdev->pdev->dev); @@ -1095,6 +1160,8 @@ err: qed_iov_wq_stop(cdev, false); + qed_slowpath_wq_stop(cdev); + return rc; } @@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (!cdev) return -ENODEV; + qed_slowpath_wq_stop(cdev); + qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { @@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev, } } +int qed_mfw_tlv_req(struct qed_hwfn *hwfn) +{ + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Scheduling slowpath task [Flag: %d]\n", + QED_SLOWPATH_MFW_TLV_REQ); + smp_mb__before_atomic(); + set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + + return 0; +} + +static void +qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) +{ + struct qed_common_cb_ops *op = cdev->protocol_ops.common; + struct qed_eth_stats_common *p_common; + struct qed_generic_tlvs gen_tlvs; + struct qed_eth_stats stats; + int i; + + memset(&gen_tlvs, 0, sizeof(gen_tlvs)); + op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); + + if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) + tlv->flags.ipv4_csum_offload = true; + if (gen_tlvs.feat_flags & QED_TLV_LSO) + tlv->flags.lso_supported = true; + tlv->flags.b_set = true; + + for (i = 0; i < QED_TLV_MAC_COUNT; i++) { + if (is_valid_ether_addr(gen_tlvs.mac[i])) { + ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); + tlv->mac_set[i] = true; + } + } + + qed_get_vport_stats(cdev, &stats); + p_common = &stats.common; + tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + tlv->rx_frames_set = true; + tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + + p_common->rx_bcast_bytes; + tlv->rx_bytes_set = true; + tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + + p_common->tx_bcast_pkts; + tlv->tx_frames_set = true; + tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + + p_common->tx_bcast_bytes; + tlv->rx_bytes_set = true; +} + int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, union qed_mfw_tlv_data *tlv_buf) { - return -EINVAL; + struct qed_dev *cdev = hwfn->cdev; + struct qed_common_cb_ops *ops; + + ops = cdev->protocol_ops.common; + if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { + DP_NOTICE(hwfn, "Can't collect TLV management info\n"); + return -EINVAL; + } + + switch (type) { + case QED_MFW_TLV_GENERIC: + qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); + break; + case QED_MFW_TLV_ETH: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); + break; + case QED_MFW_TLV_FCOE: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); + break; + case QED_MFW_TLV_ISCSI: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); + break; + default: + break; + } + + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e80f5e7c7992..2612e3e458d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_S_TAG_UPDATE: qed_mcp_update_stag(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_TLV_REQ: + qed_mfw_tlv_req(p_hwfn); break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 92b5352287df..44af652e7407 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -751,6 +751,14 @@ struct qed_int_info { u8 used_cnt; }; +struct qed_generic_tlvs { +#define QED_TLV_IP_CSUM BIT(0) +#define QED_TLV_LSO BIT(1) + u16 feat_flags; +#define QED_TLV_MAC_COUNT 3 + u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; +}; + #define QED_NVM_SIGNATURE 0x12435687 enum qed_nvm_flash_cmd { @@ -765,6 +773,8 @@ struct qed_common_cb_ops { void (*link_update)(void *dev, struct qed_link_output *link); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); + void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); + void (*get_protocol_tlv_data)(void *dev, void *data); }; struct qed_selftest_ops { -- cgit v1.2.3 From 3893fc62b1769db3ef160f7f1e36d3db754497ee Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:51 -0700 Subject: qed*: Support other classification modes. Currently, driver supports flow classification to PF receive queues based on TCP/UDP 4 tuples [src_ip, dst_ip, src_port, dst_port] only. This patch enables to configure different flow profiles [For example - only UDP dest port or src_ip based] on the adapter so that classification can be done according to just those fields as well. Although, at a time just one type of flow configuration is supported due to limited number of flow profiles available on the device. For example - ethtool -N enp7s0f0 flow-type udp4 dst-port 45762 action 2 ethtool -N enp7s0f0 flow-type tcp4 src-ip 192.16.4.10 action 1 ethtool -N enp7s0f0 flow-type udp6 dst-port 45762 action 3 Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 ++ drivers/net/ethernet/qlogic/qede/qede_filter.c | 31 ++++++++++++++++++++++++-- include/linux/qed/qed_eth_if.h | 1 + 3 files changed, 32 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 5e655c3601cf..3cb8a8088467 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1973,6 +1973,8 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) return GFT_PROFILE_TYPE_4_TUPLE; if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) return GFT_PROFILE_TYPE_IP_DST_ADDR; + if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) + return GFT_PROFILE_TYPE_IP_SRC_ADDR; return GFT_PROFILE_TYPE_L4_DST_PORT; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 43ed420900c1..9b84f0cb12e7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1623,9 +1623,17 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip4_spec.psrc; t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - /* We must have a valid 4-tuple */ + /* We must either have a valid 4-tuple or only dst port + * or only src ip as an input + */ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !t->src_ipv4 && !t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !t->dst_ipv4 && t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; @@ -1697,11 +1705,21 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip6_spec.psrc; t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - /* We must make sure we have a valid 4-tuple */ + /* We must make sure we have a valid 4-tuple or only dest port + * or only src ip as an input + */ if (t->src_port && t->dst_port && memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; @@ -1779,6 +1797,15 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { DP_INFO(edev, "Queue out-of-bounds\n"); return -EINVAL; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7f9756fe9180..557e86e1c7d9 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -66,6 +66,7 @@ enum qed_filter_config_mode { QED_FILTER_CONFIG_MODE_5_TUPLE, QED_FILTER_CONFIG_MODE_L4_PORT, QED_FILTER_CONFIG_MODE_IP_DEST, + QED_FILTER_CONFIG_MODE_IP_SRC, }; struct qed_ntuple_filter_params { -- cgit v1.2.3 From 608e00d0a2eb53079c55dc9c14d8711bbb3a4390 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:53 -0700 Subject: qed*: Support drop action classification With this patch, User can configure for the supported flows to be dropped. Added a stat "gft_filter_drop" as well to be populated in ethtool for the dropped flows. For example - ethtool -N p5p1 flow-type udp4 dst-port 8000 action -1 ethtool -N p5p1 flow-type tcp4 scr-ip 192.168.8.1 action -1 Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 35 ++++++++++++++----------- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 1 + drivers/net/ethernet/qlogic/qede/qede_filter.c | 14 ++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 1 + include/linux/qed/qed_eth_if.h | 3 +++ include/linux/qed/qed_if.h | 1 + 7 files changed, 41 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 3cb8a8088467..1c0d0c217936 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1677,6 +1677,8 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(tstats.mftag_filter_discard); p_stats->common.mac_filter_discards += HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.gft_filter_drop += + HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); } static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, @@ -2015,16 +2017,6 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc = -EINVAL; - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); - if (rc) - return rc; - } - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); @@ -2049,15 +2041,28 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - p_ramrod->rx_qid_valid = 1; - p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + if (p_params->b_is_drop) { + p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); + } else { + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, + &abs_rx_q_id); + if (rc) + return rc; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); } p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - - p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 2d3f09ed413b..81c5c8dfa2ef 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -75,6 +75,7 @@ struct qede_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8c6fdad91986..6906e04b609e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -161,6 +161,7 @@ static const struct { QEDE_STAT(no_buff_discards), QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), + QEDE_PF_STAT(gft_filter_drop), QEDE_STAT(tx_err_drop_pkts), QEDE_STAT(ttl0_discard), QEDE_STAT(packet_too_big_discard), diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6c02c21d996d..e9e088d9c815 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -90,6 +90,7 @@ struct qede_arfs_fltr_node { bool filter_op; bool used; u8 fw_rc; + bool b_is_drop; struct hlist_node node; }; @@ -125,6 +126,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.length = n->buf_len; params.qid = rxq_id; params.b_is_add = add_fltr; + params.b_is_drop = n->b_is_drop; if (n->vfid) { params.b_is_vf = true; @@ -1445,6 +1447,9 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) fsp->ring_cookie |= ((u64)fltr->vfid) << ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; } + + if (fltr->b_is_drop) + fsp->ring_cookie = RX_CLS_FLOW_DISC; unlock: __qede_unlock(edev); return rc; @@ -1816,6 +1821,10 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } + /* If drop requested then no need to validate other data */ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + return 0; + if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) return 0; @@ -1852,6 +1861,11 @@ static void qede_flow_set_destination(struct qede_dev *edev, struct qede_arfs_fltr_node *n, struct ethtool_rx_flow_spec *fs) { + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + n->b_is_drop = true; + return; + } + n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); n->next_rxq_id = n->rxq_id; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9e70f713c3c5..d118771e1a7b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -347,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; p_common->mftag_filter_discards = stats.common.mftag_filter_discards; p_common->mac_filter_discards = stats.common.mac_filter_discards; + p_common->gft_filter_drop = stats.common.gft_filter_drop; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 557e86e1c7d9..2978fa4add42 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -89,6 +89,9 @@ struct qed_ntuple_filter_params { /* true iff this filter is to be added. Else to be removed */ bool b_is_add; + + /* If flow needs to be dropped */ + bool b_is_drop; }; struct qed_dev_eth_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 44af652e7407..ac991a3b8f03 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1129,6 +1129,7 @@ struct qed_eth_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; -- cgit v1.2.3 From d602de8e7e7fc25fb3a2112ce4285962f15aa549 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 28 May 2018 19:51:57 -0700 Subject: drivers/net: Fix various unnecessary characters after logging newlines Remove and coalesce formats when there is an unnecessary character after a logging newline. These extra characters cause logging defects. Miscellanea: o Coalesce formats Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c | 6 ++---- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 4 ++-- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 3 +-- drivers/net/wireless/intersil/prism54/islpci_eth.c | 6 +++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c | 2 +- 11 files changed, 18 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e500528ad751..8a815bb57177 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1766,7 +1766,7 @@ static int load_firmware(struct octeon_device *oct) ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); if (ret) { - dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", fw_name); release_firmware(fw); return ret; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 6cec2a6a3dcc..7503aa222392 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { - dev_err(&adapter->pdev->dev, "Failed to get minidump template, " - "err_code : %d, requested_size : %d, actual_size : %d\n ", + dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", cmd.rsp.cmd, size, cmd.rsp.arg2); } pci_free_consistent(adapter->pdev, size, addr, md_template_addr); @@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter) if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, - "Flashed firmware version does not support minidump, " - "minimum version required is [ %u.%u.%u ].\n ", + "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 560528962658..fde20fd9942c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Sending final cleanup for PFVF[%d] [Command %08x\n]", + "Sending final cleanup for PFVF[%d] [Command %08x]\n", id, command); qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8293c2028002..70de062b72a1 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; @@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index f5b405c98047..b6122aad639e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1264,7 +1264,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) brcmf_dbg(TRACE, "Enter\n"); if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { - brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); + brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); err = brcmf_fil_cmd_data_set(vif->ifp, BRCMF_C_DISASSOC, NULL, 0); if (err) { diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ba3fb1d2ddb4..f26beeb6c5ff 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7557,8 +7557,7 @@ static int ipw_associate(void *data) } if (priv->status & STATUS_DISASSOCIATING) { - IPW_DEBUG_ASSOC("Not attempting association (in " - "disassociating)\n "); + IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n"); schedule_work(&priv->associate); return 0; } diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c index 9b0ded733294..b277113b33d3 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.c +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c @@ -57,7 +57,7 @@ islpci_eth_cleanup_transmit(islpci_private *priv, #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "cleanup skb %p skb->data %p skb->len %u truesize %u\n ", + "cleanup skb %p skb->data %p skb->len %u truesize %u\n", skb, skb->data, skb->len, skb->truesize); #endif @@ -328,7 +328,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ", + "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n", control_block->rx_data_low[priv->free_data_rx].address, skb->data, skb->len, offset, skb->truesize); #endif @@ -436,7 +436,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ", + "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n", skb, skb->data, skb->len, index, skb->truesize); #endif diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 38b2ba1ac6f8..380e86f9e00b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1267,8 +1267,8 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index c4b86a84a721..26b674aca125 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1175,8 +1175,8 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 718a73c623a7..505ab1b055ff 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3406,8 +3406,8 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 9111ba7ff0a1..3be8c88971e2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -2642,7 +2642,7 @@ static void rtl8821ae_dm_edca_choose_traffic_idx( if (cur_tx_bytes > (cur_rx_bytes*4)) { *pb_is_cur_rdl_state = false; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, - "Uplink Traffic\n "); + "Uplink Traffic\n"); } else { *pb_is_cur_rdl_state = true; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, -- cgit v1.2.3 From 32d26a685c1802a0e485bd674e7dd038e88019f7 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 29 May 2018 02:31:24 -0700 Subject: qed*: Add link change count value to ethtool statistics display. This patch adds driver changes for capturing the link change count in ethtool statistics display. Please consider applying this to "net-next". Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 12 ++++++++++-- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 2 ++ drivers/net/ethernet/qlogic/qede/qede_main.c | 1 + include/linux/qed/qed_if.h | 1 + 5 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1c0d0c217936..eed4725761f5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1854,6 +1854,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_change_count)); } static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, @@ -1961,11 +1966,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ - if (!cdev->reset_stats) + if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); - else + } else { _qed_get_vport_stats(cdev, cdev->reset_stats); + cdev->reset_stats->common.link_change_count = 0; + } } static enum gft_profile_type diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 81c5c8dfa2ef..d7ed0d3dbf71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -88,6 +88,7 @@ struct qede_stats_common { u64 coalesced_aborts_num; u64 non_coalesced_pkts; u64 coalesced_bytes; + u64 link_change_count; /* port */ u64 rx_64_byte_packets; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6906e04b609e..f4a0f8ff8261 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -171,6 +171,8 @@ static const struct { QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), + + QEDE_STAT(link_change_count), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d118771e1a7b..6a796040a32c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -399,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + p_common->link_change_count = stats.common.link_change_count; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index ac991a3b8f03..b4040023cbfb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1180,6 +1180,7 @@ struct qed_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; + u64 link_change_count; }; struct qed_eth_stats_bb { -- cgit v1.2.3 From 5e9f20359a166de934b5bd02cb54c0be0e8c2890 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 31 May 2018 18:47:36 -0700 Subject: qed: Fix shared memory inconsistency between driver and the MFW. The structure shared between driver and management firmware (MFW) differ in sizes. The additional field defined by the MFW is not relevant to the current driver. Add a dummy field to the structure. Fixes: cac6f691 ("qed: Add support for Unified Fabric Port") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8e1e6e1eb40e..beba9308011b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11996,6 +11996,7 @@ struct public_port { #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + u32 reserved1; u32 oem_cfg_port; #define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 #define OEM_CFG_CHANNEL_TYPE_OFFSET 0 -- cgit v1.2.3 From b5fabb080062e7685b898e9c0ec4d95f4d526ed2 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 31 May 2018 18:47:37 -0700 Subject: qed: Fix use of incorrect shmem address. Incorrect shared memory address is used while deriving the values for tc and pri_type. Use shmem address corresponding to 'oem_cfg_func' where the management firmare saves tc/pri_type values. Fixes: cac6f691 ("qed: Add support for Unified Fabric Port") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2612e3e458d9..6f9927d1a501 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1514,9 +1514,10 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); - val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> + OEM_CFG_FUNC_TC_OFFSET; p_hwfn->ufp_info.tc = (u8)val; - val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; -- cgit v1.2.3 From 39dbc646fd2c67ee9b71450ce172cbd714d4e7fb Mon Sep 17 00:00:00 2001 From: Yuval Bason Date: Sun, 3 Jun 2018 19:13:07 +0300 Subject: qed: Add srq core support for RoCE and iWARP This patch adds support for configuring SRQ and provides the necessary APIs for rdma upper layer driver (qedr) to enable the SRQ feature. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Yuval Bason Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 178 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 + drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++- include/linux/qed/qed_rdma_if.h | 12 +- 9 files changed, 234 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 820b226d6ff8..7ed6aa0521e1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -47,6 +47,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) p_mgr->srq_count = num_srqs; } -static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; @@ -2071,7 +2072,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, u32 num_cons, num_qps, num_srqs; enum protocol_type proto; - num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_NOTICE(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index a4e95869889f..758a8b4c0de8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type); u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type); +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index beba9308011b..b9704be53537 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -9725,6 +9725,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, + IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, MAX_IWARP_EQE_ASYNC_OPCODE }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 2a2b1018ed1d..474e6cff5b97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, p_ramrod->sq_num_pages = qp->sq_num_pages; p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); @@ -3004,8 +3006,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; struct regpair *fw_handle = &data->rdma_data.async_handle; struct qed_iwarp_ep *ep = NULL; + u16 srq_offset; + u16 srq_id; u16 cid; ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, @@ -3067,6 +3072,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, qed_iwarp_cid_cleaned(p_hwfn, cid); break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_EMPTY, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_LIMIT, + &srq_id); + break; case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 68c4399ffd50..b04d57ca5176 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -64,6 +64,7 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) +#define QED_RDMA_SRQS QED_ROCE_QPS static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, if (IS_ENABLED(CONFIG_QED_RDMA)) { params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index a411f9c702a1..b8705107a93a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, goto free_cid_map; } + /* Allocate bitmap for srqs */ + p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, + p_rdma_info->num_srqs, "SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_alloc(p_hwfn); if (rc) - goto free_cid_map; + goto free_srq_map; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; +free_srq_map: + kfree(p_rdma_info->srq_map.bitmap); +free_real_cid_map: + kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: kfree(p_rdma_info->cid_map.bitmap); free_tid_map: @@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, if (cdev->rdma_max_sge) dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; + if (p_hwfn->cdev->rdma_max_srq_sge) { + dev->max_srq_sge = min_t(u32, + p_hwfn->cdev->rdma_max_srq_sge, + dev->max_srq_sge); + } dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; dev->max_inline = (cdev->rdma_max_inline) ? @@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; dev->max_pkey = QED_RDMA_MAX_P_KEY; + dev->max_srq = p_hwfn->p_rdma_info->num_srqs; + dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / @@ -1628,6 +1652,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) return QED_LEADING_HWFN(cdev); } +static int qed_rdma_modify_srq(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *in_params) +{ + struct rdma_srq_modify_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + u16 opaque_fid; + int rc; + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_MODIFY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_modify_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + opaque_fid = p_hwfn->hw_info.opaque_fid; + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_destroy_srq(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *in_params) +{ + struct rdma_srq_destroy_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + struct qed_bmap *bmap; + u16 opaque_fid; + int rc; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_destroy_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_create_srq(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *in_params, + struct qed_rdma_create_srq_out_params *out_params) +{ + struct rdma_srq_create_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + enum qed_cxt_elem_type elem_type; + struct qed_spq_entry *p_ent; + u16 opaque_fid, srq_id; + struct qed_bmap *bmap; + u32 returned_id; + int rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); + return rc; + } + + elem_type = QED_ELEM_SRQ; + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); + if (rc) + goto err; + /* returned id is no greater than u16 */ + srq_id = (u16)returned_id; + opaque_fid = p_hwfn->hw_info.opaque_fid; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_srq; + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); + p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); + p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->page_size = cpu_to_le16(in_params->page_size); + DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->srq_id = srq_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "SRQ created Id = %x\n", out_params->srq_id); + + return rc; + +err: + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + return rc; +} + bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; @@ -1773,6 +1946,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, + .rdma_create_srq = &qed_rdma_create_srq, + .rdma_modify_srq = &qed_rdma_modify_srq, + .rdma_destroy_srq = &qed_rdma_destroy_srq, .ll2_acquire_connection = &qed_ll2_acquire_connection, .ll2_establish_connection = &qed_ll2_establish_connection, .ll2_terminate_connection = &qed_ll2_terminate_connection, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 18ec9cbd84f5..6f722ee8ee94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -96,6 +96,8 @@ struct qed_rdma_info { u8 num_cnqs; u32 num_qps; u32 num_mrs; + u32 num_srqs; + u16 srq_id_offset; u16 queue_zone_base; u16 max_queue_zones; enum protocol_type proto; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 6acfd43c1a4f..ee57fcdc698d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); @@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, */ qed_roce_free_real_icid(p_hwfn, icid); } else { - struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)data->rdma_data.async_handle.lo; + + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + union rdma_eqe_data rdata = data->rdma_data; - events->affiliated_event(p_hwfn->p_rdma_info->events.context, - fw_event_code, - (void *)&data->rdma_data.async_handle); + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata.async_handle); + } } return 0; diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 4dd72ba210f5..e05e320d28b7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -485,7 +485,9 @@ enum qed_iwarp_event_type { QED_IWARP_EVENT_ACTIVE_MPA_REPLY, QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, - QED_IWARP_EVENT_TERMINATE_RECEIVED + QED_IWARP_EVENT_TERMINATE_RECEIVED, + QED_IWARP_EVENT_SRQ_LIMIT, + QED_IWARP_EVENT_SRQ_EMPTY, }; enum qed_tcp_ip_version { @@ -646,6 +648,14 @@ struct qed_rdma_ops { int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid); + int (*rdma_create_srq)(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *iparams, + struct qed_rdma_create_srq_out_params *oparams); + int (*rdma_destroy_srq)(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *iparams); + int (*rdma_modify_srq)(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *iparams); + int (*ll2_acquire_connection)(void *rdma_cxt, struct qed_ll2_acquire_data *data); -- cgit v1.2.3 From ff2e351e1928b5b81a23d78e3e4effc24db007b9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 4 Jun 2018 21:10:31 +0800 Subject: qed: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Acked-by: Tomer Tayar Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7ed6aa0521e1..b5b5ff725426 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -937,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_mngr->t2[i].p_phys, GFP_KERNEL); + *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + size, &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; } - memset(*p_virt, 0, size); p_mngr->t2[i].size = size; total_size -= size; } -- cgit v1.2.3 From d52c89f120de849575f6b2e5948038f2be12ce6f Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 5 Jun 2018 13:11:16 +0300 Subject: qed*: Utilize FW 8.37.2.0 This FW contains several fixes and features. RDMA - Several modifications and fixes for Memory Windows - drop vlan and tcp timestamp from mss calculation in driver for this FW - Fix SQ completion flow when local ack timeout is infinite - Modifications in t10dif support ETH - Fix aRFS for tunneled traffic without inner IP. - Fix chip configuration which may fail under heavy traffic conditions. - Support receiving any-VNI in VXLAN and GENEVE RX classification. iSCSI / FcoE - Fix iSCSI recovery flow - Drop vlan and tcp timestamp from mss calc for fw 8.37.2.0 Misc - Several registers (split registers) won't read correctly with ethtool -d Signed-off-by: Ariel Elior Signed-off-by: Manish Rangankar Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 139 +++--- drivers/infiniband/hw/qedr/verbs.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 492 ++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 462 ++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_hw.c | 20 + drivers/net/ethernet/qlogic/qed/qed_hw.h | 12 + .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 50 ++- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 13 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 3 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 8 +- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 3 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 31 +- drivers/scsi/qedi/qedi_iscsi.c | 6 - include/linux/qed/common_hsi.h | 4 +- include/linux/qed/iscsi_common.h | 8 +- include/linux/qed/qed_rdma_if.h | 4 +- include/linux/qed/roce_common.h | 1 + 19 files changed, 727 insertions(+), 536 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qed') diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b816c80df50b..7e1f7021396a 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum { RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, RDMA_CQE_REQ_STS_XRC_VOILATION_ERR, + RDMA_CQE_REQ_STS_SIG_ERR, MAX_RDMA_CQE_REQUESTER_STATUS_ENUM }; @@ -152,12 +153,12 @@ struct rdma_rq_sge { struct regpair addr; __le32 length; __le32 flags; -#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF -#define RDMA_RQ_SGE_L_KEY_SHIFT 0 +#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF +#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0 #define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 #define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 -#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 -#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 +#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7 +#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29 }; struct rdma_srq_sge { @@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg { MAX_RDMA_DIF_IO_DIRECTION_FLG }; -/* RDMA DIF Runt Result Structure */ -struct rdma_dif_runt_result { - __le16 guard_tag; - __le16 reserved[3]; +struct rdma_dif_params { + __le32 base_ref_tag; + __le16 app_tag; + __le16 app_tag_mask; + __le16 runt_crc_value; + __le16 flags; +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1 +#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8 +#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9 +#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10 +#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F +#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11 + __le32 reserved5; }; -/* Memory window type enumeration */ -enum rdma_mw_type { - RDMA_MW_TYPE_1, - RDMA_MW_TYPE_2A, - MAX_RDMA_MW_TYPE -}; struct rdma_sq_atomic_wqe { __le32 reserved1; @@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe { #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 -#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6 u8 wqe_size; u8 prev_wqe_size; u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 @@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe { __le32 length_lo; __le32 parent_l_key; __le32 reserved4; + struct rdma_dif_params dif_params; }; /* First element (16 bytes) of bind wqe */ @@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd { u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 @@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd { __le32 reserved4; }; +/* Third element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_3rd { + struct rdma_dif_params dif_params; +}; + /* Structure with only the SQ WQE common * fields. Size is of one SQ element (16B) */ @@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe { u8 length_hi; __le32 length_lo; struct regpair pbl_addr; - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd { struct regpair pbl_addr; }; -/* Third element (16 bytes) of fmr wqe */ -struct rdma_sq_fmr_wqe_3rd { - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; -}; struct rdma_sq_local_inv_wqe { struct regpair reserved; @@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe { #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 +#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; @@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe { u8 dif_flags; #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F -#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3 - u8 reserved2[3]; +#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F +#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1 + u8 reserved3[3]; }; /* First element (16 bytes) of rdma wqe */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 3f9afc02d166..e2caabb8a926 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge); - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, wr->sg_list[i].lkey); RQ_SGE_SET(rqe, wr->sg_list[i].addr, @@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, /* First one must include the number * of SGE in the list */ - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0); SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); RQ_SGE_SET(rqe, 0, 0, flags); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 39124b594a36..b9ec460dd996 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -183,16 +183,9 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct chip_platform_defs { - u8 num_ports; - u8 num_pfs; - u8 num_vfs; -}; - /* Chip constant definitions */ struct chip_defs { const char *name; - struct chip_platform_defs per_platform[MAX_PLATFORM_IDS]; }; /* Platform constant definitions */ @@ -317,6 +310,11 @@ struct phy_defs { u32 tbus_data_hi_addr; }; +/* Split type definitions */ +struct split_type_defs { + const char *name; +}; + /******************************** Constants **********************************/ #define MAX_LCIDS 320 @@ -469,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb", - {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "ah", - {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "reserved", - {{0, 0, 0}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } } + {"bb"}, + {"ah"}, + {"reserved"}, }; /* Storm constant definitions array */ @@ -1588,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = { {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_DUMP_NIG */ {{1, 1, 1}, 0, 1, false, false, 0, 1}, @@ -1745,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = { PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; +static struct split_type_defs s_split_type_defs[] = { + /* SPLIT_TYPE_NONE */ + {"eng"}, + + /* SPLIT_TYPE_PORT */ + {"port"}, + + /* SPLIT_TYPE_PF */ + {"pf"}, + + /* SPLIT_TYPE_PORT_PF */ + {"port"}, + + /* SPLIT_TYPE_VF */ + {"vf"} +}; + /**************************** Private Functions ******************************/ /* Reads and returns a single dword from the specified unaligned buffer */ @@ -1781,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 num_pfs = 0, max_pfs_per_port = 0; if (dev_data->initialized) return DBG_STATUS_OK; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; + dev_data->num_vfs = MAX_NUM_VFS_K2; + num_pfs = MAX_NUM_PFS_K2; + max_pfs_per_port = MAX_NUM_PFS_K2 / 2; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; + dev_data->num_vfs = MAX_NUM_VFS_BB; + num_pfs = MAX_NUM_PFS_BB; + max_pfs_per_port = MAX_NUM_PFS_BB; } else { return DBG_STATUS_UNKNOWN_CHIP; } + /* Set platofrm */ dev_data->platform_id = PLATFORM_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; + /* Set port mode */ + switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { + case 0: + dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; + break; + case 1: + dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; + break; + case 2: + dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; + break; + } + + /* Set 100G mode */ + if (dev_data->chip_id == CHIP_BB && + qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2) + dev_data->mode_enable[MODE_100G] = 1; + + /* Set number of ports */ + if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || + dev_data->mode_enable[MODE_100G]) + dev_data->num_ports = 1; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) + dev_data->num_ports = 2; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) + dev_data->num_ports = 4; + + /* Set number of PFs per port */ + dev_data->num_pfs_per_port = min_t(u32, + num_pfs / dev_data->num_ports, + max_pfs_per_port); + /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); dev_data->use_dmae = true; - dev_data->num_regs_read = 0; dev_data->initialized = 1; return DBG_STATUS_OK; @@ -1821,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ -static void qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 storm_id, struct fw_info *fw_info) +static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 storm_id, struct fw_info *fw_info) { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; @@ -1945,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { - /* Read FW image/version from PRAM in a non-reset SEMI */ - bool found = false; - u8 storm_id; - - for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; - storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; - - /* Read FW version/image */ - if (dev_data->block_in_reset[storm->block_id]) - continue; - - /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - if (snprintf(fw_ver_str, sizeof(fw_ver_str), - "%d_%d_%d_%d", fw_info.ver.num.major, - fw_info.ver.num.minor, fw_info.ver.num.rev, - fw_info.ver.num.eng) < 0) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } - - found = true; + /* Read FW info from chip */ + qed_read_fw_info(p_hwfn, p_ptt, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } } @@ -2412,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - count: no. of dumped entries - * - split: split type - * - id: split ID (dumped only if split_id >= 0) + * - count: no. of dumped entries + * - split_type: split type + * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) * - param_name: user parameter value (dumped only if param_name != NULL * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, - const char *split_type, - int split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { - u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0); + u8 num_params = 2 + + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, @@ -2433,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, offset += qed_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries); offset += qed_dump_str_param(dump_buf + offset, - dump, "split", split_type); - if (split_id >= 0) + dump, "split", + s_split_type_defs[split_type].name); + if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); if (param_name && param_val) @@ -2463,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, + u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; if (!dump) return len; @@ -2481,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, dev_data->num_regs_read = 0; } + switch (split_type) { + case SPLIT_TYPE_PORT: + port_id = split_id; + break; + case SPLIT_TYPE_PF: + pf_id = split_id; + break; + case SPLIT_TYPE_PORT_PF: + port_id = split_id / dev_data->num_pfs_per_port; + pf_id = port_id + dev_data->num_ports * + (split_id % dev_data->num_pfs_per_port); + break; + case SPLIT_TYPE_VF: + vf_id = split_id; + break; + default: + break; + } + /* Try reading using DMAE */ - if (dev_data->use_dmae && + if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || wide_bus)) { if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), @@ -2494,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, "Failed reading from chip using DMAE, using GRC instead\n"); } - /* Read registers */ + /* If not read using DMAE, read using GRC */ + + /* Set pretend */ + if (split_type != dev_data->pretend.split_type || split_id != + dev_data->pretend.split_id) { + switch (split_type) { + case SPLIT_TYPE_PORT: + qed_port_pretend(p_hwfn, p_ptt, port_id); + break; + case SPLIT_TYPE_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + case SPLIT_TYPE_PORT_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); + break; + case SPLIT_TYPE_VF: + fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | + (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + default: + break; + } + + dev_data->pretend.split_type = (u8)split_type; + dev_data->pretend.split_id = split_id; + } + + /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); return len; @@ -2518,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, u8 split_id) { u32 offset = 0; @@ -2526,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + split_type, split_id); return offset; } @@ -2559,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, curr_len, false); + dump, addr, curr_len, false, + SPLIT_TYPE_NONE, 0); reg_offset += curr_len; addr += curr_len; @@ -2581,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct dbg_array input_regs_arr, u32 *dump_buf, bool dump, + enum init_split_types split_type, + u8 split_id, bool block_enable[MAX_BLOCK_ID], u32 *num_dumped_reg_entries) { @@ -2628,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, dump, addr, len, - wide_bus); + wide_bus, + split_type, split_id); (*num_dumped_reg_entries)++; } } @@ -2643,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], - const char *split_type_name, - u32 split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum init_split_types hdr_split_type = split_type; u32 num_dumped_reg_entries, offset; + u8 hdr_split_id = split_id; + + /* In PORT_PF split type, print a port split header */ + if (split_type == SPLIT_TYPE_PORT_PF) { + hdr_split_type = SPLIT_TYPE_PORT; + hdr_split_id = split_id / dev_data->num_pfs_per_port; + } /* Calculate register dump header size (and skip it for now) */ offset = qed_grc_dump_regs_hdr(dump_buf, false, 0, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, @@ -2663,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, input_regs_arr, dump_buf + offset, dump, + split_type, + split_id, block_enable, &num_dumped_reg_entries); @@ -2671,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); return num_dumped_reg_entries > 0 ? offset : 0; } @@ -2688,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *chip; - u8 port_id, pf_id, vf_id; u16 fid; - - chip = &s_chip_defs[dev_data->chip_id]; - chip_platform = &chip->per_platform[dev_data->platform_id]; - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_regs_arr; + enum init_split_types split_type; + u16 split_count = 0; u32 split_data_size; - u8 split_type_id; + u8 split_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -2717,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; curr_input_regs_arr.size_in_dwords = split_data_size; - switch (split_type_id) { + switch (split_type) { case SPLIT_TYPE_NONE: - offset += qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "eng", - (u32)(-1), - param_name, - param_val); + split_count = 1; break; - case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < chip_platform->num_ports; - port_id++) { - if (dump) - qed_port_pretend(p_hwfn, p_ptt, - port_id); - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "port", port_id, - param_name, - param_val); - } + split_count = dev_data->num_ports; break; - case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < chip_platform->num_pfs; - pf_id++) { - u8 pfid_shift = - PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; - - if (dump) { - fid = pf_id << pfid_shift; - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "pf", - pf_id, - param_name, - param_val); - } + split_count = dev_data->num_ports * + dev_data->num_pfs_per_port; break; - case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < chip_platform->num_vfs; - vf_id++) { - u8 vfvalid_shift = - PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; - u8 vfid_shift = - PXP_PRETEND_CONCRETE_FID_VFID_SHIFT; - - if (dump) { - fid = BIT(vfvalid_shift) | - (vf_id << vfid_shift); - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "vf", vf_id, - param_name, - param_val); - } + split_count = dev_data->num_vfs; break; - default: - break; + return 0; } + for (split_id = 0; split_id < split_count; split_id++) + offset += qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + split_type, + split_id, + param_name, + param_val); + input_offset += split_data_size; } - /* Pretend to original PF */ + /* Cancel pretends (pretend to original PF) */ if (dump) { fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; qed_fid_pretend(p_hwfn, p_ptt, fid); + dev_data->pretend.split_type = SPLIT_TYPE_NONE; + dev_data->pretend.split_id = 0; } return offset; @@ -2825,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, + SPLIT_TYPE_NONE, 0, NULL, NULL); /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { @@ -2838,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, dump, BYTES_TO_DWORDS (s_reset_regs_defs[i].addr), 1, - false); + false, SPLIT_TYPE_NONE, 0); num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, - true, num_regs, "eng", -1, NULL, NULL); + true, num_regs, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2864,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, SPLIT_TYPE_NONE, + 0, NULL, NULL); /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { @@ -2899,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); addr = GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS); offset += qed_grc_dump_reg_entry(p_hwfn, @@ -2907,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); num_reg_entries += 2; } } @@ -2929,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); num_reg_entries++; } @@ -2937,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, if (dump) qed_grc_dump_regs_hdr(dump_buf, true, - num_reg_entries, "eng", -1, NULL, NULL); + num_reg_entries, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2950,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, - dump, 2, "eng", -1, NULL, NULL); + dump, 2, SPLIT_TYPE_NONE, 0, + NULL, NULL); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). @@ -3096,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + SPLIT_TYPE_NONE, 0); return offset; } @@ -3235,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_mems_arr; + enum init_split_types split_type; u32 split_data_size; - u8 split_type_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -3250,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; curr_input_mems_arr.size_in_dwords = split_data_size; - switch (split_type_id) { - case SPLIT_TYPE_NONE: + if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump); - break; - - default: + else DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); - break; - } input_offset += split_data_size; } @@ -3623,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, dump, addr, num_dwords_to_read, - false); + false, + SPLIT_TYPE_NONE, 0); total_dwords -= num_dwords_to_read; rss_addr++; } @@ -3682,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump, addr, len, - false); + false, SPLIT_TYPE_NONE, 0); } return offset; @@ -3731,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, - dump, 1, "eng", -1, "block", "MCP"); + dump, 1, SPLIT_TYPE_NONE, 0, + "block", "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -3739,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -3923,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, dump, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); } /* Disable block's client and debug output */ @@ -3949,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; bool parities_masked = false; - u8 i, port_mode = 0; u32 offset = 0; + u8 i; *num_dumped_dwords = 0; + dev_data->num_regs_read = 0; - if (dump) { - /* Find port mode */ - switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { - case 0: - port_mode = 1; - break; - case 1: - port_mode = 2; - break; - case 2: - port_mode = 4; - break; - } - - /* Update reset state */ + /* Update reset state */ + if (dump) qed_update_blocks_reset_state(p_hwfn, p_ptt); - } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3989,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS)); offset += qed_dump_num_param(dump_buf + offset, - dump, "num-ports", port_mode); + dump, "num-ports", dev_data->num_ports); /* Dump reset registers (dumped before taking blocks out of reset ) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) @@ -4093,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); - /* Dump static debug data */ + /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && - dev_data->bus.state == DBG_BUS_STATE_IDLE) + (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) offset += qed_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump); @@ -4250,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - reg->size, wide_bus); + reg->size, wide_bus, + SPLIT_TYPE_NONE, 0); } } @@ -4373,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, next_reg_offset, dump, addr, reg->entry_size, - wide_bus); + wide_bus, + SPLIT_TYPE_NONE, 0); } /* Call rule condition function. @@ -4723,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords, false); + trace_data_size_dwords, false, + SPLIT_TYPE_NONE, 0); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -4829,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4898,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4956,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, true, addr, override_window_dwords, - true); + true, SPLIT_TYPE_NONE, 0); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: @@ -4998,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); asserts = &fw_info.fw_asserts_section; @@ -5036,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, asserts->list_element_dword_size, - false); + false, SPLIT_TYPE_NONE, 0); } /* Dump last section */ @@ -5063,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Skip Storm if it's in reset */ + if (dev_data->block_in_reset[storm->block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); + + return true; + } + + return false; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index fde20fd9942c..b285edc8d6a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, { u32 port_mode; - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); + port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); if (port_mode < 3) { p_hwfn->cdev->num_ports_in_engine = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b9704be53537..bee10c1781fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1095,14 +1095,16 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; - u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; }; /* Ramrod data for PF start ramrod */ @@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config { u8 update_rx_def_non_ucast_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; }; /* Data for port update ramrod */ @@ -2535,7 +2540,14 @@ struct idle_chk_data { u16 reserved2; }; -/* Debug Tools data (per HW function) */ +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ struct dbg_tools_data { struct dbg_grc_data grc; struct dbg_bus_data bus; @@ -2544,8 +2556,13 @@ struct dbg_tools_data { u8 block_in_reset[88]; u8 chip_id; u8 platform_id; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; u8 initialized; u8 use_dmae; + u8 reserved; + struct pretend_params pretend; u32 num_regs_read; }; @@ -2974,6 +2991,24 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); +/** + * @brief qed_read_fw_info - Reads FW info from the chip. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param fw_info - Out: a pointer to write the FW info into. + * + * @return true if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); + /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. @@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); +#define NUM_STORMS 6 + +/** + * @brief qed_set_rdma_error_level - Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param assert_level - An array of assert levels for each storm. + * + */ +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) -static const struct iro iro_arr[51] = { +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + +static const struct iro iro_arr[59] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = { {0x10768, 0x20, 0x0, 0x0, 0x20}, {0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc748, 0x8, 0x0, 0x0, 0x1}, + {0xa128, 0x8, 0x0, 0x0, 0x1}, + {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xf030, 0x8, 0x0, 0x0, 0x1}, + {0x13028, 0x8, 0x0, 0x0, 0x1}, + {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xed90, 0x10, 0x0, 0x0, 0x10}, - {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0xed90, 0x28, 0x0, 0x0, 0x28}, + {0xa520, 0x18, 0x0, 0x0, 0x18}, + {0xa6a0, 0x8, 0x0, 0x0, 0x8}, {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; /* Runtime array offsets */ @@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = { #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 - -#define RUNTIME_ARRAY_SIZE 43023 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021 + +#define RUNTIME_ARRAY_SIZE 43022 + /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 }; /* Command for setting tpa parameters */ @@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx { #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx { #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data { struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; - struct regpair dif_runt_addr; - __le32 reserved4[2]; + __le32 reserved4[4]; }; /* rdma resize cq output params */ @@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data { enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, - RDMA_TID_MW_TYPE1, - RDMA_TID_MW_TYPE2A, + RDMA_TID_MW, MAX_RDMA_TID_TYPE }; @@ -7681,6 +7779,16 @@ struct e4_roce_conn_context { struct ustorm_roce_conn_st_ctx ustorm_st_context; }; +/* roce cqes statistics */ +struct roce_cqe_stats { + __le32 req_cqe_error; + __le32 req_remote_access_errors; + __le32 req_remote_invalid_request; + __le32 resp_cqe_error; + __le32 resp_local_length_error; + __le32 reserved; +}; + /* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; @@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats { /* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { - __le32 num_bound_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp requester ramrod data */ @@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data { /* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { - __le32 num_invalidated_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp responder ramrod data */ @@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce error statistics */ +struct roce_error_stats { + __le32 resp_remote_access_errors; + __le32 reserved; +}; + /* roce special events statistics */ struct roce_events_stats { - __le16 silent_drops; - __le16 rnr_naks_sent; + __le32 silent_drops; + __le32 rnr_naks_sent; __le32 retransmit_count; __le32 icrc_error_count; - __le32 reserved; + __le32 implied_nak_seq_err; + __le32 duplicate_request; + __le32 local_ack_timeout_err; + __le32 out_of_sequence; + __le32 packet_seq_err; + __le32 rnr_nak_retry_err; }; -/* ROCE slow path EQ cmd IDs */ +/* roce slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7845,6 +7964,9 @@ struct roce_init_func_params { u8 cnp_dscp; u8 reserved; __le32 cnp_send_timeout; + __le16 rl_offset; + u8 rl_count_log; + u8 reserved1[5]; }; /* roce func init ramrod data */ @@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le16 rq_prod; __le16 conn_dpi; __le16 irq_cons; - __le32 num_invlidated_mw; + __le32 reg9; __le32 reg10; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index fca2dbd93ad9..70504dcf4087 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) *(u32 *)&p_ptt->pxp.pretend); } +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) { u32 concrete_fid = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 8db2839a8ec8..505e94db939d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -244,6 +244,18 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief qed_port_fid_pretend - pretend to another port and another function + * when accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + */ +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid); + /** * @brief qed_vfid_to_concrete - build a concrete FID for a * given VF ID diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1365da7c8900..d845badf9b90 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, ram_line_lo, ram_line_hi; + u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + /* Tunnel type */ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); @@ -1337,8 +1340,13 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; } + qed_wr(p_hwfn, + p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, @@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + + default: + return 0; + } +} + +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); + + qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 474e6cff5b97..90a2b53096e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt, struct qed_iwarp_info *iwarp_info; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; - u8 ts_hdr_size = 0; u32 cid; int rc; @@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt, iparams->cm_info.private_data, iparams->cm_info.private_data_len); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - ep->mss = iparams->mss - ts_hdr_size; + ep->mss = iparams->mss; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = iparams->event_cb; @@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) u8 local_mac_addr[ETH_ALEN]; struct qed_iwarp_ep *ep; int tcp_start_offset; - u8 ts_hdr_size = 0; u8 ll2_syn_handle; int payload_len; u32 hdr_size; @@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + - ts_hdr_size; + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index eed4725761f5..1f6ac848109d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & QED_ACCEPT_ANY_VNI)); + p_ramrod->rx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->rx_mode.state = 0x%x\n", state); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index c4030e949cce..806a8da257e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -183,6 +183,7 @@ struct qed_filter_accept_flags { #define QED_ACCEPT_MCAST_MATCHED 0x08 #define QED_ACCEPT_MCAST_UNMATCHED 0x10 #define QED_ACCEPT_BCAST 0x20 +#define QED_ACCEPT_ANY_VNI 0x40 }; struct qed_arfs_config_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index b8705107a93a..101d677114f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt, case QED_RDMA_TID_FMR: tid_type = RDMA_TID_FMR; break; - case QED_RDMA_TID_MW_TYPE1: - tid_type = RDMA_TID_MW_TYPE1; - break; - case QED_RDMA_TID_MW_TYPE2A: - tid_type = RDMA_TID_MW_TYPE2A; + case QED_RDMA_TID_MW: + tid_type = RDMA_TID_MW; break; default: rc = -EINVAL; @@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt, RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); DMA_REGPAIR_LE(p_ramrod->dif_error_addr, params->dif_error_addr); - DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f7122059b6b5..d8ad2dcad8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -178,7 +178,7 @@ 0x008c80UL #define MCP_REG_SCRATCH \ 0xe20000UL -#define CNIG_REG_NW_PORT_MODE_BB_B0 \ +#define CNIG_REG_NW_PORT_MODE_BB \ 0x218200UL #define MISCS_REG_CHIP_NUM \ 0x00976cUL @@ -1621,6 +1621,7 @@ #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 #define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index ee57fcdc698d..b5ce1581645f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; @@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - *num_invalidated_mw = 0; *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { @@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; @@ -764,8 +760,7 @@ err: } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - u32 *num_bound_mw) + struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; @@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -968,8 +962,6 @@ err_resp: int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - u32 num_invalidated_mw = 0; - u32 num_bound_mw = 0; u32 cq_prod; int rc; @@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } } return 0; @@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { - u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; /* Perform additional operations according to the current state and the @@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) @@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, qp->cq_prod = cq_prod; - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index ff21aa1fd939..2f0a4f2c5ff8 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) else hdrs += IPV4_HDR_LEN; - if (vlan_en) - hdrs += VLAN_LEN; - mss = pmtu - hdrs; - if (tcp_ts_en) - mss -= TCP_OPTION_LEN; - if (!mss) mss = DEF_MSS; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 13c8ab171437..0081fa6d1268 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -109,8 +109,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 33 -#define FW_REVISION_VERSION 11 +#define FW_MINOR_VERSION 37 +#define FW_REVISION_VERSION 2 #define FW_ENGINEERING_VERSION 0 /***********************/ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 938df614cb6a..b34c573f2b30 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx { #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 @@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx { #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 u8 flags1; diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index e05e320d28b7..df4d13f7e191 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -65,8 +65,7 @@ enum qed_roce_qp_state { enum qed_rdma_tid_type { QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_FMR, - QED_RDMA_TID_MW_TYPE1, - QED_RDMA_TID_MW_TYPE2A + QED_RDMA_TID_MW }; struct qed_rdma_events { @@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params { bool dif_enabled; u64 dif_error_addr; - u64 dif_runt_addr; }; struct qed_rdma_create_cq_in_params { diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 193bcef302e1..473fba76aa77 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -43,6 +43,7 @@ #define ROCE_MAX_QPS (32 * 1024) #define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_LKEY_MW_DIF_EN_BIT (28) /* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { -- cgit v1.2.3