diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
19 files changed, 422 insertions, 211 deletions
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index a6c18b6527f9..93846bace028 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -852,7 +852,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) ndev->priv_flags |= IFF_UNICAST_FLT; ndev->netdev_ops = &hisi_femac_netdev_ops; ndev->ethtool_ops = &hisi_femac_ethtools_ops; - netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll, + FEMAC_POLL_WEIGHT); hisi_femac_port_init(priv); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 22a463e15678..2f0bd21a9082 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1782,7 +1782,7 @@ static int hns_nic_set_features(struct net_device *netdev, priv->ops.fill_desc = fill_tso_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* The chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); + netif_set_tso_max_size(netdev, 7 * 4096); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; @@ -2168,7 +2168,7 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.fill_desc = fill_tso_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* This chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); + netif_set_tso_max_size(netdev, 7 * 4096); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index b668df6193be..7d4ae467f3ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -92,8 +93,8 @@ struct hclge_ring_chain_param { struct hclge_basic_info { u8 hw_tc_map; u8 rsv; - u16 mbx_api_version; - u32 pf_caps; + __le16 mbx_api_version; + __le32 pf_caps; }; struct hclgevf_mbx_resp_status { @@ -134,11 +135,20 @@ struct hclge_vf_to_pf_msg { }; struct hclge_pf_to_vf_msg { - u16 code; - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; - u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + __le16 code; + union { + /* used for mbx response */ + struct { + __le16 vf_mbx_msg_code; + __le16 vf_mbx_msg_subcode; + __le16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; }; struct hclge_mbx_vf_to_pf_cmd { @@ -148,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd { u8 rsv1[1]; u8 msg_len; u8 rsv2; - u16 match_id; + __le16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -159,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd { u8 rsv[3]; u8 msg_len; u8 rsv1; - u16 match_id; + __le16 match_id; struct hclge_pf_to_vf_msg msg; }; @@ -169,6 +179,49 @@ struct hclge_vf_rst_cmd { u8 rsv[22]; }; +#pragma pack(1) +struct hclge_mbx_link_status { + __le16 link_status; + __le32 speed; + __le16 duplex; + u8 flag; +}; + +struct hclge_mbx_link_mode { + __le16 idx; + __le64 link_mode; +}; + +struct hclge_mbx_port_base_vlan { + __le16 state; + __le16 vlan_proto; + __le16 qos; + __le16 vlan_tag; +}; + +struct hclge_mbx_vf_queue_info { + __le16 num_tqps; + __le16 rss_size; + __le16 rx_buf_len; +}; + +struct hclge_mbx_vf_queue_depth { + __le16 num_tx_desc; + __le16 num_rx_desc; +}; + +struct hclge_mbx_vlan_filter { + u8 is_kill; + __le16 vlan_id; + __le16 proto; +}; + +struct hclge_mbx_mtu_info { + __le32 mtu; +}; + +#pragma pack() + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 @@ -177,7 +230,7 @@ struct hclgevf_mbx_arq_ring { u32 head; u32 tail; atomic_t count; - u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; + __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; #define hclge_mbx_ring_ptr_move_crq(crq) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 79c64f4e67d2..8a3a446219f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index c15ca710dabb..c8b151d29f53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static void diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 876650eddac4..7a7d4cf9bf35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, }; enum HCLGE_COMM_API_CAP_BITS { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index aa1d7a6ff4ca..946d166a452d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, - u32 *indir, __le16 rss_ind_tbl_size); + u32 *indir, u16 rss_ind_tbl_size); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key); int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a3ee7875d6a7..ae56306400b8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f4da77452126..6d20974519fe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->tx_pending = priv->ring[0].desc_num; param->rx_pending = priv->ring[rx_queue_index].desc_num; kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; + kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, + &priv->state); } static void hns3_get_pauseparam(struct net_device *netdev, @@ -1104,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "ringparam not changed\n"); + return false; + } + + return true; +} + static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) { struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -1120,62 +1152,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) return 0; } +static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push) + return -EOPNOTSUPP; + + if (tx_push == old_state) + return 0; + + netdev_dbg(netdev, "Changing tx push from %s to %s\n", + old_state ? "on" : "off", tx_push ? "on" : "off"); + + if (tx_push) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + else + clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param, struct kernel_ethtool_ringparam *kernel_param, struct netlink_ext_ack *extack) { + struct hns3_ring_param old_ringparam, new_ringparam; struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_ring *tmp_rings; bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - u16 queue_num = h->kinfo.num_tqps; - u32 old_rx_buf_len; int ret, i; ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; - /* Hardware requires that its descriptors must be multiple of eight */ - new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); - new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring[0].desc_num; - old_rx_desc_num = priv->ring[queue_num].desc_num; - old_rx_buf_len = priv->ring[queue_num].buf_size; - if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num && - kernel_param->rx_buf_len == old_rx_buf_len) + ret = hns3_set_tx_push(ndev, kernel_param->tx_push); + if (ret) + return ret; + + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) return 0; tmp_rings = hns3_backup_ringparam(priv); if (!tmp_rings) { - netdev_err(ndev, - "backup ring param failed by allocating memory fail\n"); + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); return -ENOMEM; } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", - old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num, - old_rx_buf_len, kernel_param->rx_buf_len); + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); - hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); - hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); - hns3_change_rx_buf_len(ndev, old_rx_buf_len); - hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); @@ -1385,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, return 0; } -static int hns3_check_coalesce_para(struct net_device *netdev, - struct ethtool_coalesce *cmd) +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) { int ret; + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + ret = hns3_check_gl_coalesce_para(netdev, cmd); if (ret) { netdev_err(netdev, @@ -1464,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev, if (hns3_nic_resetting(netdev)) return -EBUSY; - ret = hns3_check_coalesce_para(netdev, cmd); + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); if (ret) return ret; @@ -1825,23 +1897,30 @@ static int hns3_set_tunable(struct net_device *netdev, case ETHTOOL_TX_COPYBREAK_BUF_SIZE: old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); if (ret || (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; - netdev_warn(netdev, - "change tx spare buf size fail, revert to old value\n"); + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); ret1 = hns3_set_tx_spare_buf_size(netdev, old_tx_spare_buf_size); if (ret1) { - netdev_err(netdev, - "revert to old tx spare buf size fail\n"); + netdev_err(netdev, "revert to old tx spare buf size fail\n"); return ret1; } return ret; } + + if (!priv->ring->tx_spare) + netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n"); + else + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + break; default: ret = -EOPNOTSUPP; @@ -1858,7 +1937,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) -#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN +#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ + ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index 822d6fcbc73b..da207d1d9aa9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { u8 link_ext_substate; }; +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 42a9e73d8588..6efd768cc07c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, * @num: number of extended command structures * * This function handles all the PF RAS errors in the - * hw register/s using command. + * hw registers using command. */ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, struct hclge_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8cebb180c812..1ebad0e50e6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - int node, ret; + int ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); - /* Set the affinity based on numa node */ - node = dev_to_node(&hdev->pdev->dev); - if (node != NUMA_NO_NODE) - cpumask = cpumask_of_node(node); - - cpumask_copy(&hdev->affinity_mask, cpumask); - return ret; } @@ -3564,17 +3556,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } -static void hclge_misc_affinity_setup(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, - &hdev->affinity_mask); -} - -static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); -} - static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -10449,6 +10430,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) /* PF's mps must be greater then VF's mps */ for (i = 1; i < hdev->num_alloc_vport; i++) if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); mutex_unlock(&hdev->vport_lock); return -EINVAL; } @@ -11454,11 +11438,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); - /* Setup affinity after service timer setup because add_timer_on - * is called in affinity notify. - */ - hclge_misc_affinity_setup(hdev); - hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -11876,7 +11855,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_reset_vf_rate(hdev); hclge_clear_vf_vlan(hdev); - hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_ptp_uninit(hdev); hclge_uninit_rxd_adv_layout(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index c70239758bb2..18caddd541f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg { union { struct { u8 is_kill; - u16 vlan; - u16 proto; + __le16 vlan; + __le16 proto; }; u8 enable; }; @@ -938,8 +938,6 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); - /* affinity mask and notify for misc interrupt */ - cpumask_t affinity_mask; struct hclge_ptp *ptp; struct devlink *devlink; struct hclge_comm_rss_cfg rss_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7998ca617a92..e1012f7f9b73 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -57,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; resp_pf_to_vf->match_id = vf_to_pf_req->match_id; - resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; - resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; - resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); resp = hclge_errno_to_resp(resp_msg->status); if (resp < SHRT_MAX) { - resp_pf_to_vf->msg.resp_status = resp; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); } else { dev_warn(&hdev->pdev->dev, "failed to send response to VF, response status %u is out-of-bound\n", resp); - resp_pf_to_vf->msg.resp_status = EIO; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); } if (resp_msg->len > 0) @@ -107,9 +109,9 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, resp_pf_to_vf->dest_vfid = dest_vfid; resp_pf_to_vf->msg_len = msg_len; - resp_pf_to_vf->msg.code = mbx_opcode; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); - memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); @@ -125,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; + __le16 msg_data; u16 reset_type; - u8 msg_data[2]; u8 dest_vfid; BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); @@ -140,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) else reset_type = HNAE3_VF_FUNC_RESET; - memcpy(&msg_data[0], &reset_type, sizeof(u16)); + msg_data = cpu_to_le16(reset_type); /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -249,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, return ret; } +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { @@ -339,16 +416,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, struct hclge_vlan_info *vlan_info) { -#define MSG_DATA_SIZE 8 + struct hclge_mbx_port_base_vlan base_vlan; - u8 msg_data[MSG_DATA_SIZE]; + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); - memcpy(&msg_data[0], &state, sizeof(u16)); - memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); - memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); - memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); - - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), HCLGE_MBX_PUSH_VLAN_INFO, vfid); } @@ -362,13 +437,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; switch (msg_cmd->subcode) { case HCLGE_MBX_VLAN_FILTER: - return hclge_set_vlan_filter(handle, - cpu_to_be16(msg_cmd->proto), - msg_cmd->vlan, msg_cmd->is_kill); + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); case HCLGE_MBX_VLAN_RX_OFF_CFG: return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: @@ -411,15 +489,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport, struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; struct hclge_basic_info *basic_info; unsigned int i; + u32 pf_caps; basic_info = (struct hclge_basic_info *)resp_msg->data; for (i = 0; i < kinfo->tc_info.num_tc; i++) basic_info->hw_tc_map |= BIT(i); + pf_caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - hnae3_set_bit(basic_info->pf_caps, - HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + basic_info->pf_caps = cpu_to_le32(pf_caps); resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } @@ -427,19 +507,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_RSS_INFO_LEN 6 -#define HCLGE_TQPS_ALLOC_OFFSET 0 -#define HCLGE_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; struct hclge_dev *hdev = vport->back; /* get the queue related info */ - memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], - &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], - &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], - &hdev->rx_buf_len, sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; } @@ -454,16 +530,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_DEPTH_INFO_LEN 4 -#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; struct hclge_dev *hdev = vport->back; /* get the queue depth info */ - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], - &hdev->num_tx_desc, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], - &hdev->num_rx_desc, sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; } @@ -488,10 +563,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_mbx_link_status link_info; struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[9]; - u16 duplex; /* mac.link can only be 0 or 1 */ switch (vport->vf_info.link_state) { @@ -507,14 +581,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) break; } - duplex = hdev->hw.mac.duplex; - memcpy(&msg_data[0], &link_status, sizeof(u16)); - memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); - memcpy(&msg_data[6], &duplex, sizeof(u16)); - msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } @@ -522,22 +595,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { #define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; struct hclge_dev *hdev = vport->back; unsigned long advertising; unsigned long supported; unsigned long send_data; - u8 msg_data[10] = {}; u8 dest_vfid; advertising = hdev->hw.mac.advertising[0]; supported = hdev->hw.mac.supported[0]; dest_vfid = mbx_req->mbx_src_vfid; - msg_data[0] = mbx_req->msg.data[0]; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); - send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; - - memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); - hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } @@ -551,7 +624,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, u16 queue_id; int ret; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; resp_msg->len = sizeof(u8); @@ -587,9 +660,11 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport) static int hclge_set_vf_mtu(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { + struct hclge_mbx_mtu_info *mtu_info; u32 mtu; - memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); return hclge_set_vport_mtu(vport, mtu); } @@ -602,7 +677,7 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; u16 queue_id, qid_in_pf; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); @@ -610,7 +685,7 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); - memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); resp_msg->len = sizeof(qid_in_pf); return 0; } @@ -755,6 +830,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret = hclge_map_unmap_ring_to_vf_vector(vport, false, req); break; + case HCLGE_MBX_GET_RING_VECTOR_MAP: + ret = hclge_get_vf_ring_vector_map(vport, req, + &resp_msg); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + break; case HCLGE_MBX_SET_PROMISC_MODE: hclge_set_vf_promisc_mode(vport, req); break; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index 5b0b71bd6120..8510b88d4982 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 342d7cdf6285..5eaf09ea4009 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) basic_info = (struct hclge_basic_info *)resp_msg; hdev->hw_tc_map = basic_info->hw_tc_map; - hdev->mbx_api_version = basic_info->mbx_api_version; - caps = basic_info->pf_caps; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 -#define HCLGEVF_TQPS_ALLOC_OFFSET 0 -#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int status; @@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], - sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], - sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], - sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); return 0; } @@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 -#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) return ret; } - memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], - sizeof(u16)); - memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], - sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); return 0; } @@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, sizeof(resp_data)); if (!ret) - qid_in_pf = *(u16 *)resp_data; + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); return qid_in_pf; } @@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 -#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 -#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER); - send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, - sizeof(vlan_id)); - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, - sizeof(proto)); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. @@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) for (i = 1; i < handle->kinfo.num_tqps; i++) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &i, sizeof(i)); + *(__le16 *)send_msg.data = cpu_to_le16(i); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (ret) return ret; @@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); - memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } @@ -2963,7 +2957,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, @@ -3315,7 +3309,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGEVF_TQP_REG_SIZE * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } @@ -3333,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size) + struct hclge_mbx_port_base_vlan *port_base_vlan) { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; @@ -3358,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); - memcpy(send_msg.data, port_base_vlan_info, data_size); + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (!ret) { if (state == HNAE3_PORT_BASE_VLAN_DISABLE) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 4b00fd44f118..59ca6c794d6d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size); + struct hclge_mbx_port_base_vlan *port_base_vlan); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index d5e0a3f762f7..bbf7b14079de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held - * to prtect the received_response from race condition + * to protect the received_response from race condition */ hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; @@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox * message to PF. * @hdev: pointer to struct hclgevf_dev - * @resp_msg: pointer to store the original message type and response status - * @len: the resp_msg data array length. + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. */ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) @@ -122,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); - req->match_id = hdev->mbx_resp.match_id; + req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -160,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, struct hclge_mbx_pf_to_vf_cmd *req) { + u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); + u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + u16 resp_status = le16_to_cpu(req->msg.resp_status); + u16 match_id = le16_to_cpu(req->match_id); if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); + "VF mbx resp flag not clear(%u)\n", + vf_mbx_msg_code); + + resp->origin_mbx_msg = (vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= vf_mbx_msg_subcode; + resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); - if (req->match_id) { + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or * ignore the response. and driver will clear hdev->mbx_resp * when send next message which need response. */ - if (req->match_id == resp->match_id) + if (match_id == resp->match_id) resp->received_resp = true; } else { resp->received_resp = true; @@ -197,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%u)\n", - req->msg.code); + le16_to_cpu(req->msg.code)); return; } @@ -216,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_comm_cmq_ring *crq; struct hclge_desc *desc; u16 flag; + u16 code; crq = &hdev->hw.hw.cmq.crq; @@ -230,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + code = le16_to_cpu(req->msg.code); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %u\n", - req->msg.code); + code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -249,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) * timeout and simultaneously queue the async messages for later * prcessing in context of mailbox task i.e. the slow path. */ - switch (req->msg.code) { + switch (code) { case HCLGE_MBX_PF_VF_RESP: hclgevf_handle_mbx_response(hdev, req); break; @@ -263,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) default: dev_err(&hdev->pdev->dev, "VF received unsupported(%u) mbx msg from PF\n", - req->msg.code); + code); break; } crq->desc[crq->next_to_use].flag = 0; @@ -285,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + struct hclge_mbx_port_base_vlan *vlan_info; + struct hclge_mbx_link_status *link_info; + struct hclge_mbx_link_mode *link_mode; enum hnae3_reset_type reset_type; u16 link_status, state; - u16 *msg_q, *vlan_info; + __le16 *msg_q; + u16 opcode; u8 duplex; u32 speed; u32 tail; u8 flag; - u8 idx; + u16 idx; tail = hdev->arq.tail; @@ -306,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } msg_q = hdev->arq.msg_q[hdev->arq.head]; - - switch (msg_q[0]) { + opcode = le16_to_cpu(msg_q[0]); + switch (opcode) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = msg_q[1]; - memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)msg_q[4]; - flag = (u8)msg_q[5]; + link_info = (struct hclge_mbx_link_status *)(msg_q + 1); + link_status = le16_to_cpu(link_info->link_status); + speed = le32_to_cpu(link_info->speed); + duplex = (u8)le16_to_cpu(link_info->duplex); + flag = link_info->flag; /* update upper layer with new link link status */ hclgevf_update_speed_duplex(hdev, speed, duplex); @@ -324,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_MODE: - idx = (u8)msg_q[1]; + link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1); + idx = le16_to_cpu(link_mode->idx); if (idx) - memcpy(&hdev->hw.mac.supported, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.supported = + le64_to_cpu(link_mode->link_mode); else - memcpy(&hdev->hw.mac.advertising, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.advertising = + le64_to_cpu(link_mode->link_mode); break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -338,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_type = (enum hnae3_reset_type)msg_q[1]; + reset_type = + (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); break; case HCLGE_MBX_PUSH_VLAN_INFO: - state = msg_q[1]; - vlan_info = &msg_q[1]; + vlan_info = + (struct hclge_mbx_port_base_vlan *)(msg_q + 1); + state = le16_to_cpu(vlan_info->state); hclgevf_update_port_base_vlan_info(hdev, state, - (u8 *)vlan_info, 8); + vlan_info); break; case HCLGE_MBX_PUSH_PROMISC_INFO: - hclgevf_parse_promisc_info(hdev, msg_q[1]); + hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%u) message from arq\n", - msg_q[0]); + opcode); break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index e4bfb6191fef..5d4895bb57a1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, |