summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c193
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c483
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c133
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c306
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h155
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1084
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c145
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
51 files changed, 3548 insertions, 2150 deletions
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..1a9da564b306 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..ea09a09c451b 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,6 +4,7 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_reg.h"
@@ -33,6 +34,16 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -72,6 +83,7 @@ enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -84,6 +96,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -94,14 +107,17 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
- u64 count;
+ bool need_reset;
- void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
+ void (*irq_handle)(struct hbg_priv *priv,
+ const struct hbg_irq_info *info);
};
struct hbg_vector {
char name[HBG_VECTOR_NUM][32];
- struct hbg_irq_info *info_array;
+
+ u64 *stats_array;
+ const struct hbg_irq_info *info_array;
u32 info_array_len;
};
@@ -114,6 +130,135 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
+};
+
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+ u64 reset_fail_cnt;
};
struct hbg_priv {
@@ -126,6 +271,15 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..01ad82d2f5cc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ const struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_reset),
+ str_true_false(info->need_print),
+ priv->vectors.stats_array[i]);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ bool np_link_fail;
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+
+ np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+ seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail));
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..c0ce74cf7382
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+ HBG_PUSH_STATS_I(reset_fail_cnt, 87),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->stats_array[i];
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..83cf75bf7a17
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ /* The index of host mac is always 0. */
+ u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr);
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+ hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
+ if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ netif_device_detach(priv->netdev);
+
+ priv->reset_type = type;
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ if (hbg_reset(priv))
+ goto err_unlock;
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+
+err_unlock:
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ hbg_err_reset(priv);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..fb9fbe7004e8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..1d62ff913737 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,495 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+ HBG_STATS_I(reset_fail_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
+
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..d0aa0661ecd4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -11,12 +12,21 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
#define HBG_PCU_FRAME_LEN_PLUS 4
+#define HBG_FIFO_TX_FULL_THRSLD 0x3F0
+#define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
+#define HBG_FIFO_RX_FULL_THRSLD 0x240
+#define HBG_FIFO_RX_EMPTY_THRSLD 0x190
+#define HBG_CFG_FIFO_FULL_THRSLD 0x10
+#define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
+
static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
{
return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
@@ -67,6 +77,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +147,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +177,21 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ /* burst_len BIT(29) set to 1 can improve the TX performance.
+ * But packet drop occurs when mtu > 2000.
+ * So, BIT(29) reset to 0 when mtu > 2000.
+ */
+ u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
+
+ hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
+ HBG_REG_BRUST_LENGTH_B, burst_len_bit);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -201,10 +230,99 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ u32 link_status;
+ int ret;
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
+ hbg_np_link_fail_task_schedule(priv);
+}
+
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+
+ /* only uc filter is supported, so set all bits of mc mask reg to 1 */
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
+static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value = 0;
+
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
+
+ if (dir & HBG_DIR_TX)
+ hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
+
+ if (dir & HBG_DIR_RX)
+ hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value;
+
+ value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
+
+ if (dir & HBG_DIR_TX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
+ }
+
+ if (dir & HBG_DIR_RX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
+ }
+
+ hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
}
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
@@ -267,5 +385,12 @@ int hbg_hw_init(struct hbg_priv *priv)
hbg_hw_init_rx_control(priv);
hbg_hw_init_transmit_ctrl(priv);
+
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
+ HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
+ HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
+ hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
+ HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..8af0bc4cca21 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -6,54 +6,65 @@
#include "hbg_hw.h"
static void hbg_irq_handle_err(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->tx_ring.napi);
}
static void hbg_irq_handle_rx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
-
-static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, hbg_irq_handle_err}
+
+static const struct hbg_irq_info hbg_irqs[] = {
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
struct hbg_priv *priv = p;
u32 status;
u32 i;
@@ -68,7 +79,7 @@ static irqreturn_t hbg_irq_handle(int irq_num, void *p)
hbg_hw_irq_enable(priv, info->mask, false);
hbg_hw_irq_clear(priv, info->mask);
- info->count++;
+ priv->vectors.stats_array[i]++;
if (info->irq_handle)
info->irq_handle(priv, info);
@@ -121,6 +132,12 @@ int hbg_irq_init(struct hbg_priv *priv)
irq_names_map[i]);
}
+ vectors->stats_array = devm_kcalloc(&priv->pdev->dev,
+ ARRAY_SIZE(hbg_irqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!vectors->stats_array)
+ return -ENOMEM;
+
vectors->info_array = hbg_irqs;
vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..2e64dc1ab355 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,18 +5,23 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
+#include "hbg_debugfs.h"
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
u32 i;
for (i = 0; i < priv->vectors.info_array_len; i++) {
@@ -55,11 +60,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +75,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,12 +203,12 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
- WRITE_ONCE(netdev->mtu, new_mtu);
-
dev_dbg(&priv->pdev->dev,
"change mtu from %u to %u\n", netdev->mtu, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
return 0;
}
@@ -122,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -134,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -142,8 +276,93 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +379,21 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -216,13 +449,18 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -245,7 +483,27 @@ static struct pci_driver hbg_driver = {
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..8b7b476ed7fb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,8 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
#include "hbg_mdio.h"
@@ -17,6 +19,9 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+#define HBG_NO_PHY 0xFF
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -114,6 +119,47 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ rtnl_lock();
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ goto unlock;
+ }
+
+ if (!priv->mac.phydev->link)
+ goto unlock;
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+
+unlock:
+ rtnl_unlock();
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +186,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +215,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
@@ -183,6 +231,39 @@ void hbg_phy_stop(struct hbg_priv *priv)
phy_stop(priv->mac.phydev);
}
+static void hbg_fixed_phy_uninit(void *data)
+{
+ fixed_phy_unregister((struct phy_device *)data);
+}
+
+static int hbg_fixed_phy_init(struct hbg_priv *priv)
+{
+ struct fixed_phy_status hbg_fixed_phy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .pause = 1,
+ .asym_pause = 1,
+ };
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = fixed_phy_register(&hbg_fixed_phy_status, NULL);
+ if (IS_ERR(phydev)) {
+ dev_err_probe(dev, PTR_ERR(phydev),
+ "failed to register fixed PHY device\n");
+ return PTR_ERR(phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, hbg_fixed_phy_uninit, phydev);
+ if (ret)
+ return ret;
+
+ priv->mac.phydev = phydev;
+ return hbg_phy_connect(priv);
+}
+
int hbg_mdio_init(struct hbg_priv *priv)
{
struct device *dev = &priv->pdev->dev;
@@ -192,6 +273,9 @@ int hbg_mdio_init(struct hbg_priv *priv)
int ret;
mac->phy_addr = priv->dev_specs.phy_addr;
+ if (mac->phy_addr == HBG_NO_PHY)
+ return hbg_fixed_phy_init(priv);
+
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
return dev_err_probe(dev, -ENOMEM,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..a39d1e796e4a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,12 +10,21 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -28,6 +37,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +46,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +57,97 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
+#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230)
+#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_FIFO_THRSLD_FULL_M GENMASK(25, 16)
+#define HBG_REG_FIFO_THRSLD_EMPTY_M GENMASK(9, 0)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M GENMASK(31, 24)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M GENMASK(23, 16)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M GENMASK(15, 8)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M GENMASK(7, 0)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -74,20 +165,34 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_BRUST_LENGTH_ADDR (HBG_REG_SGMII_BASE + 0x04C4)
+#define HBG_REG_BRUST_LENGTH_B BIT(29)
#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
@@ -101,6 +206,13 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
@@ -139,5 +251,48 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..8d814c8f19ea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
@@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
- hbg_dma_unmap(buffer);
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free(buffer);
+ goto next_buffer;
+ }
+ hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
index 2883a5899ae2..8b6110599e10 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
- return (ring->ntu + ring->len - ring->ntc) % ring->len;
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 851490346261..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0eb03dff1a8b..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 46af467aa596..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 0f4cc184ef39..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 42bb341fd80b..e905f10b894e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1402,7 +1402,7 @@ static void hns_nic_net_down(struct net_device *ndev)
if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
return;
- (void)del_timer_sync(&priv->service_timer);
+ (void) timer_delete_sync(&priv->service_timer);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
netif_tx_disable(ndev);
@@ -2075,7 +2075,7 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+ struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6c458f037262..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9a63fbc69408..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 710a8f9f2248..3b548f71fa8a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -339,6 +339,10 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+#define hnae3_seq_file_to_ae_dev(s) (dev_get_drvdata((s)->private))
+#define hnae3_seq_file_to_handle(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->handle)
+
enum hnae3_tc_map_mode {
HNAE3_TC_MAP_MODE_PRIO,
HNAE3_TC_MAP_MODE_DSCP,
@@ -434,8 +438,11 @@ struct hnae3_ae_dev {
u32 dev_version;
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
+ struct hnae3_handle *handle;
};
+typedef int (*read_func)(struct seq_file *s, void *data);
+
/* This struct defines the operation on the handle.
*
* init_ae_dev(): (mandatory)
@@ -580,8 +587,6 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
- * dbg_read_cmd
- * Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@@ -594,6 +599,8 @@ struct hnae3_ae_dev {
* Get wake on lan info
* set_wol
* Config wake on lan
+ * dbg_get_read_func
+ * Return the read func for debugfs seq file
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -690,9 +697,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -748,8 +755,6 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -796,6 +801,9 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
+ int (*dbg_get_read_func)(struct hnae3_handle *handle,
+ enum hnae3_dbg_cmd cmd,
+ read_func *func);
};
struct hnae3_dcb_ops {
@@ -916,9 +924,6 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -966,4 +971,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 4ad4e8ab2f1f..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -348,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index 4e2bb6556b1c..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -422,7 +422,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
}
EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -448,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..0255c8acb744 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -39,323 +41,279 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
.name = "tm_nodes",
.cmd = HNAE3_DBG_CMD_TM_NODES,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_priority",
.cmd = HNAE3_DBG_CMD_TM_PRI,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_map",
.cmd = HNAE3_DBG_CMD_TM_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_pg",
.cmd = HNAE3_DBG_CMD_TM_PG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_port",
.cmd = HNAE3_DBG_CMD_TM_PORT,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tc_sch_info",
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pause_cfg",
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pri_map",
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_dscp_map",
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dev_info",
.cmd = HNAE3_DBG_CMD_DEV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "rx_bd_queue",
.cmd = HNAE3_DBG_CMD_RX_BD,
.dentry = HNS3_DBG_DENTRY_RX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mc",
.cmd = HNAE3_DBG_CMD_MAC_MC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mng_tbl",
.cmd = HNAE3_DBG_CMD_MNG_TBL,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "loopback",
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "interrupt_info",
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "reset_info",
.cmd = HNAE3_DBG_CMD_RESET_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "imp_info",
.cmd = HNAE3_DBG_CMD_IMP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncl_config",
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac_tnl_status",
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "bios_common",
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ssu",
.cmd = HNAE3_DBG_CMD_REG_SSU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "igu_egu",
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rpu",
.cmd = HNAE3_DBG_CMD_REG_RPU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncsi",
.cmd = HNAE3_DBG_CMD_REG_NCSI,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rtc",
.cmd = HNAE3_DBG_CMD_REG_RTC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ppp",
.cmd = HNAE3_DBG_CMD_REG_PPP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rcb",
.cmd = HNAE3_DBG_CMD_REG_RCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac",
.cmd = HNAE3_DBG_CMD_REG_MAC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dcb",
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "queue_map",
.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "rx_queue_info",
.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_queue_info",
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "service_task_info",
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "vlan_config",
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "fd_counter",
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "umv_info",
.cmd = HNAE3_DBG_CMD_UMV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "page_pool_info",
.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "coalesce_info",
.cmd = HNAE3_DBG_CMD_COAL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
};
@@ -420,71 +378,17 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
-static const struct hns3_dbg_item coal_info_items[] = {
- { "VEC_ID", 2 },
- { "ALGO_STATE", 2 },
- { "PROFILE_ID", 2 },
- { "CQE_MODE", 2 },
- { "TUNE_STATE", 2 },
- { "STEPS_LEFT", 2 },
- { "STEPS_RIGHT", 2 },
- { "TIRED", 2 },
- { "SW_GL", 2 },
- { "SW_QL", 2 },
- { "HW_GL", 2 },
- { "HW_QL", 2 },
-};
-
static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
static const char * const
dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
-static void hns3_dbg_fill_content(char *content, u16 len,
- const struct hns3_dbg_item *items,
- const char **result, u16 size)
-{
-#define HNS3_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HNS3_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HNS3_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- char **result, int i, bool is_tx)
+ struct seq_file *s, int i, bool is_tx)
{
unsigned int gl_offset, ql_offset;
struct hns3_enet_coalesce *coal;
unsigned int reg_val;
- unsigned int j = 0;
struct dim *dim;
bool ql_enable;
@@ -502,191 +406,96 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
ql_enable = tqp_vector->rx_group.coal.ql_enable;
}
- sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
- dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
- dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
- dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
- dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
- sprintf(result[j++], "%u", coal->int_gl);
- sprintf(result[j++], "%u", coal->int_ql);
+ seq_printf(s, "%-8d", i);
+ seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ seq_printf(s, "%-12u", dim->profile_ix);
+ seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
+ dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
reg_val = readl(tqp_vector->mask_addr + gl_offset) &
HNS3_VECTOR_GL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%-7u", reg_val);
if (ql_enable) {
reg_val = readl(tqp_vector->mask_addr + ql_offset) &
HNS3_VECTOR_QL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%u\n", reg_val);
} else {
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA\n");
}
}
-static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
- int *pos, bool is_tx)
+static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
{
- char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(coal_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
- *pos += scnprintf(buf + *pos, len - *pos,
- "%s interrupt coalesce info:\n",
- is_tx ? "tx" : "rx");
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- NULL, ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "VEC_ID ALGO_STATE PROFILE_ID CQE_MODE TUNE_STATE ");
+ seq_puts(s, "STEPS_LEFT STEPS_RIGHT TIRED SW_GL SW_QL ");
+ seq_puts(s, "HW_GL HW_QL\n");
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_get_coal_info(tqp_vector, result, i, is_tx);
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- (const char **)result,
- ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ hns3_get_coal_info(tqp_vector, s, i, is_tx);
}
}
-static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_coal_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dump_coal_info(h, buf, len, &pos, true);
- pos += scnprintf(buf + pos, len - pos, "\n");
- hns3_dump_coal_info(h, buf, len, &pos, false);
+ hns3_dump_coal_info(s, true);
+ seq_puts(s, "\n");
+ hns3_dump_coal_info(s, false);
return 0;
}
-static const struct hns3_dbg_item tx_spare_info_items[] = {
- { "QUEUE_ID", 2 },
- { "COPYBREAK", 2 },
- { "LEN", 7 },
- { "NTU", 4 },
- { "NTC", 4 },
- { "LTC", 4 },
- { "DMA", 17 },
-};
-
-static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
- int len, u32 ring_num, int *pos)
-{
- char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_tx_spare *tx_spare = ring->tx_spare;
- char *result[ARRAY_SIZE(tx_spare_info_items)];
- char content[HNS3_DBG_INFO_LEN];
- u32 i, j;
-
- if (!tx_spare) {
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx spare buffer is not enabled\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
- result[i] = &data_str[i][0];
-
- *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
- hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
- NULL, ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
-
- for (i = 0; i < ring_num; i++) {
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u", ring->tx_copybreak);
- sprintf(result[j++], "%u", tx_spare->len);
- sprintf(result[j++], "%u", tx_spare->next_to_use);
- sprintf(result[j++], "%u", tx_spare->next_to_clean);
- sprintf(result[j++], "%u", tx_spare->last_to_clean);
- sprintf(result[j++], "%pad", &tx_spare->dma);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_spare_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- }
-}
-
-static const struct hns3_dbg_item rx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "BD_LEN", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "PKTNUM", 5 },
- { "COPYBREAK", 2 },
- { "RING_EN", 2 },
- { "RX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
+ seq_printf(s, "%-11u", readl_relaxed(base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-11u", ring->rx_copybreak);
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
+ HNS3_RING_RX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -694,12 +503,9 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID BD_NUM BD_LEN TAIL HEAD FBDNUM ");
+ seq_puts(s, "PKTNUM COPYBREAK RING_EN RX_RING_EN BASE_ADDR\n");
- hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
- NULL, ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -710,86 +516,51 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_rx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_queue_info(ring, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "TC", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "OFFSET", 2 },
- { "PKTNUM", 5 },
- { "RING_EN", 2 },
- { "TX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
-
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
+ seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
+ seq_printf(s, "%-11u",
+ readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s",
+ str_on_off(readl_relaxed(base +
+ HNS3_RING_TX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -797,12 +568,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
- result[i] = &data_str[i][0];
-
- hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
- NULL, ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "QUEUE_ID BD_NUM TC TAIL HEAD FBDNUM OFFSET ");
+ seq_puts(s, "PKTNUM RING_EN TX_RING_EN BASE_ADDR\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
@@ -814,339 +581,213 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[i];
- hns3_dump_tx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_queue_info(ring, s, i);
}
- hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
-
return 0;
}
-static const struct hns3_dbg_item queue_map_items[] = {
- { "local_queue_id", 2 },
- { "global_queue_id", 2 },
- { "vector_id", 2 },
-};
-
-static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_queue_map(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(queue_map_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
- int pos = 0;
- int j;
u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "local_queue_id global_queue_id vector_id\n");
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- NULL, ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u",
- h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%d",
- priv->ring[i].tqp_vector->vector_irq);
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- (const char **)result,
- ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%-16u%-17u%d\n", i,
+ h->ae_algo->ops->get_global_queue_id(h, i),
+ priv->ring[i].tqp_vector->vector_irq);
}
return 0;
}
-static const struct hns3_dbg_item rx_bd_info_items[] = {
- { "BD_IDX", 3 },
- { "L234_INFO", 2 },
- { "PKT_LEN", 3 },
- { "SIZE", 4 },
- { "RSS_HASH", 4 },
- { "FD_ID", 2 },
- { "VLAN_TAG", 2 },
- { "O_DM_VLAN_ID_FB", 2 },
- { "OT_VLAN_TAG", 2 },
- { "BD_BASE_INFO", 2 },
- { "PTYPE", 2 },
- { "HW_CSUM", 2 },
-};
-
static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
- struct hns3_desc *desc, char **result, int idx)
+ struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
+ idx, le32_to_cpu(desc->rx.l234_info),
+ le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
+ le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
+ le16_to_cpu(desc->rx.vlan_tag),
+ le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
+ le16_to_cpu(desc->rx.ot_vlan_tag),
+ le32_to_cpu(desc->rx.bd_base_info));
+
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
- sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
- HNS3_RXD_PTYPE_M,
- HNS3_RXD_PTYPE_S));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ seq_printf(s, "%-7lu%-9u\n",
+ hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S),
+ le16_to_cpu(desc->csum));
} else {
- sprintf(result[j++], "NA");
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA NA\n");
}
}
-static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(rx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u rx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX L234_INFO PKT_LEN SIZE ");
+ seq_puts(s, "RSS_HASH FD_ID VLAN_TAG O_DM_VLAN_ID_FB ");
+ seq_puts(s, "OT_VLAN_TAG BD_BASE_INFO PTYPE HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u rx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
- NULL, ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_rx_bd_info(priv, desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_bd_info_items, (const char **)result,
- ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_bd_info(priv, desc, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 2 },
- { "ADDRESS", 13 },
- { "VLAN_TAG", 2 },
- { "SIZE", 2 },
- { "T_CS_VLAN_TSO", 2 },
- { "OT_VLAN_TAG", 3 },
- { "TV", 5 },
- { "OLT_VLAN_LEN", 2 },
- { "PAYLEN_OL4CS", 2 },
- { "BD_FE_SC_VLD", 2 },
- { "MSS_HW_CSUM", 0 },
-};
-
-static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
- sprintf(result[j++], "%#x",
- le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%u",
- le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
- sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+ seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
+ idx, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->tx.vlan_tag),
+ le16_to_cpu(desc->tx.send_size),
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
+ le16_to_cpu(desc->tx.outer_vlan_tag),
+ le16_to_cpu(desc->tx.tv),
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
+ le32_to_cpu(desc->tx.paylen_ol4cs),
+ le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
+ le16_to_cpu(desc->tx.mss_hw_csum));
}
-static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(tx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u tx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
- NULL, ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "Queue %u tx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX ADDRESS VLAN_TAG SIZE ");
+ seq_puts(s, "T_CS_VLAN_TSO OT_VLAN_TAG TV OLT_VLAN_LEN ");
+ seq_puts(s, "PAYLEN_OL4CS BD_FE_SC_VLD MSS_HW_CSUM\n");
- ring = &priv->ring[d->qid];
+ ring = &priv->ring[data->qid];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_tx_bd_info(desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_bd_info_items, (const char **)result,
- ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_bd_info(desc, s, i);
}
return 0;
}
-static void
-hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
- *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+ seq_puts(s, "dev capability:\n");
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
+ str_yes_no(state));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
-static void
-hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct net_device *dev = kinfo->netdev;
- *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
- *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
- dev_specs->mac_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
- dev_specs->mng_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
- dev_specs->rss_key_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
- kinfo->rss_size);
- *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
- kinfo->req_rss_size);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Task queue pairs numbers: %u\n",
- kinfo->num_tqps);
- *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
- kinfo->rx_buf_len);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
- kinfo->num_tx_desc);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
- kinfo->num_rx_desc);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
- dev_specs->int_ql_max);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
- dev_specs->max_int_gl);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
- dev_specs->max_tm_rate);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
- dev_specs->max_qset_num);
- *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
- dev_specs->umv_size);
- *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
- dev_specs->mc_mac_size);
- *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
- dev_specs->mac_stats_num);
- *pos += scnprintf(buf + *pos, len - *pos,
- "TX timeout threshold: %d seconds\n",
- dev->watchdog_timeo / HZ);
- *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
- dev_specs->hilink_version);
+ seq_puts(s, "dev_spec:\n");
+ seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ seq_printf(s, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
+ seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
+ seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
+ seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ seq_printf(s, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
+ seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
+ seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
+ seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
+ seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
+ seq_printf(s, "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+ seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
+ seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
}
-static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_dev_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dbg_dev_caps(h, buf, len, &pos);
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
- hns3_dbg_dev_specs(h, buf, len, &pos);
+ hns3_dbg_dev_caps(h, s);
+ hns3_dbg_dev_specs(h, s);
return 0;
}
-static const struct hns3_dbg_item page_pool_info_items[] = {
- { "QUEUE_ID", 2 },
- { "ALLOCATE_CNT", 2 },
- { "FREE_CNT", 6 },
- { "POOL_SIZE(PAGE_NUM)", 2 },
- { "ORDER", 2 },
- { "NUMA_ID", 2 },
- { "MAX_LEN", 2 },
-};
-
static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
- char **result, u32 index)
+ struct seq_file *s, u32 index)
{
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u",
- READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%d",
- atomic_read(&ring->page_pool->pages_state_release_cnt));
- sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
- sprintf(result[j++], "%u", ring->page_pool->p.order);
- sprintf(result[j++], "%d", ring->page_pool->p.nid);
- sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
+ index,
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt),
+ atomic_read(&ring->page_pool->pages_state_release_cnt),
+ ring->page_pool->p.pool_size,
+ ring->page_pool->p.order,
+ ring->page_pool->p.nid,
+ ring->page_pool->p.max_len / 1024);
}
-static int
-hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -1159,176 +800,44 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID ALLOCATE_CNT FREE_CNT ");
+ seq_puts(s, "POOL_SIZE(PAGE_NUM) ORDER NUMA_ID MAX_LEN\n");
- hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
- NULL, ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
+
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_page_pool_info(ring, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- page_pool_info_items,
- (const char **)result,
- ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_page_pool_info(ring, s, i);
}
return 0;
}
-static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
- *index = i;
- return 0;
- }
- }
-
- dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
- dbg_data->cmd);
- return -EINVAL;
-}
-
-static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
- {
- .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
- .dbg_dump = hns3_dbg_queue_map,
- },
- {
- .cmd = HNAE3_DBG_CMD_DEV_INFO,
- .dbg_dump = hns3_dbg_dev_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_BD,
- .dbg_dump_bd = hns3_dbg_tx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_BD,
- .dbg_dump_bd = hns3_dbg_rx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_rx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_tx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
- .dbg_dump = hns3_dbg_page_pool_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_COAL_INFO,
- .dbg_dump = hns3_dbg_coal_info,
- },
-};
-
-static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
-{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
- const struct hns3_dbg_func *cmd_func;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
- if (cmd == hns3_dbg_cmd_func[i].cmd) {
- cmd_func = &hns3_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(dbg_data->handle, buf,
- len);
- else
- return cmd_func->dbg_dump_bd(dbg_data, buf,
- len);
- }
- }
-
- if (!ops->dbg_read_cmd)
- return -EOPNOTSUPP;
-
- return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
-}
-
-static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
+static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
{
- struct hns3_dbg_data *dbg_data = filp->private_data;
- struct hnae3_handle *handle = dbg_data->handle;
- struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
- u32 index;
- int ret;
-
- ret = hns3_dbg_get_cmd_index(dbg_data, &index);
- if (ret)
- return ret;
-
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
+ if (data->cmd == HNAE3_DBG_CMD_TX_BD)
+ return hns3_dbg_tx_bd_info(s, private);
+ else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
+ return hns3_dbg_rx_bd_info(s, private);
- /* get data ready for the first time to read */
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
-
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
- }
-
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
-
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+ return -EOPNOTSUPP;
}
-
-static const struct file_operations hns3_dbg_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = hns3_dbg_read,
-};
+DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
{
- struct dentry *entry_dir;
struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
u16 max_queue_num;
unsigned int i;
@@ -1347,45 +856,77 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
data[i].qid = i;
sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
debugfs_create_file(name, 0400, entry_dir, &data[i],
- &hns3_dbg_fops);
+ &hns3_dbg_bd_info_fops);
}
return 0;
}
-static int
-hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
{
- struct hns3_dbg_data *data;
+ struct device *dev = &handle->pdev->dev;
struct dentry *entry_dir;
+ read_func func = NULL;
+
+ switch (hns3_dbg_cmd[cmd].cmd) {
+ case HNAE3_DBG_CMD_TX_QUEUE_INFO:
+ func = hns3_dbg_tx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_RX_QUEUE_INFO:
+ func = hns3_dbg_rx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_QUEUE_MAP:
+ func = hns3_dbg_queue_map;
+ break;
+ case HNAE3_DBG_CMD_PAGE_POOL_INFO:
+ func = hns3_dbg_page_pool_info;
+ break;
+ case HNAE3_DBG_CMD_COAL_INFO:
+ func = hns3_dbg_coal_info;
+ break;
+ case HNAE3_DBG_CMD_DEV_INFO:
+ func = hns3_dbg_dev_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
+
+ return 0;
+}
+
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
+{
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct device *dev = &handle->pdev->dev;
+ struct dentry *entry_dir;
+ read_func func;
+ int ret;
+
+ if (!ops->dbg_get_read_func)
+ return 0;
+
+ ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
+ if (ret)
+ return ret;
- data->handle = handle;
- data->cmd = hns3_dbg_cmd[cmd].cmd;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
- debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
- data, &hns3_dbg_fops);
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
return 0;
}
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1395,8 +936,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1425,24 +964,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 4a5ef8a90a10..57c9d3fc1b27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -6,15 +6,6 @@
#include "hnae3.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_READ_LEN_128KB 0x20000
-#define HNS3_DBG_READ_LEN_1MB 0x100000
-#define HNS3_DBG_READ_LEN_4MB 0x400000
-#define HNS3_DBG_READ_LEN_5MB 0x500000
-#define HNS3_DBG_WRITE_LEN 1024
-
-#define HNS3_DBG_DATA_STR_LEN 32
-#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
@@ -49,16 +40,9 @@ struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
- u32 buf_len;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
-struct hns3_dbg_func {
- enum hnae3_dbg_cmd cmd;
- int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
- int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
-};
-
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 43377a7b2426..bfa5568baa92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -473,20 +474,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -553,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -707,11 +702,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +746,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +839,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
-
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -953,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1032,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1073,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1297,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1497,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1683,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1856,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1884,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2099,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2115,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2200,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2440,7 +2451,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2452,7 +2463,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
@@ -2538,7 +2548,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2764,7 +2774,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2845,7 +2855,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3815,7 +3825,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4741,7 +4751,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -4868,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5101,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5220,7 +5255,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5258,7 +5293,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5293,6 +5328,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5305,6 +5342,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -5865,8 +5904,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5877,11 +5914,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5897,7 +5930,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5913,11 +5945,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
@@ -5939,7 +5967,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5966,8 +5994,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
@@ -6003,9 +6031,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d36c4ed16d8d..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
@@ -621,7 +623,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -692,7 +694,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b771a2daba43..d5454e126c85 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -85,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -170,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -435,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -488,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -539,8 +540,8 @@ static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
@@ -568,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -691,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -705,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -724,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -750,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -793,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -813,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -841,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -860,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -931,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -953,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -977,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -987,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1023,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1188,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1198,7 +1205,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
@@ -1274,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1299,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1307,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1376,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1448,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1472,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1648,8 +1662,8 @@ static void hns3_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1699,8 +1713,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1724,8 +1738,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1746,8 +1760,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1796,8 +1810,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1923,7 +1937,7 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
@@ -2104,6 +2118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2141,6 +2157,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..b76d25074e99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,9 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
+#define hclge_seq_file_to_hdev(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
+
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -721,48 +724,6 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-/* make sure: len(name) + interval >= maxlen(item data) + 2,
- * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
- * and print as "%u"(maxlen: 10), so the interval should be at least 5.
- */
-static void hclge_dbg_fill_content(char *content, u16 len,
- const struct hclge_dbg_item *items,
- const char **result, u16 size)
-{
-#define HCLGE_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HCLGE_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HCLGE_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
@@ -826,14 +787,14 @@ int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -846,13 +807,12 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
- cnt++, dfx_message->message);
+ seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
for (i = 0; i < cnt; i++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+ seq_printf(s, "item%u\t", i);
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
dfx_message = reg_info->dfx_msg;
@@ -867,10 +827,9 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
if (i > 0 && !entry)
desc++;
- *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
kfree(desc_src);
@@ -880,14 +839,14 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -914,9 +873,8 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (!dfx_message->flag)
continue;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%s: %#x\n", dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
@@ -940,8 +898,8 @@ static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
-static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -962,16 +920,15 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
offset = hclge_dbg_mac_en_status[i].offset;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- hclge_dbg_mac_en_status[i].message,
- hnae3_get_bit(loop_en, offset));
+ seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
}
return 0;
}
-static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -988,16 +945,14 @@ static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
- req->min_frm_size);
+ seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
+ seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
return 0;
}
-static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
+ struct seq_file *s)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -1018,33 +973,31 @@ static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup,
- HCLGE_MAC_DUPLEX_SHIFT));
+ seq_printf(s, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ seq_printf(s, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
return 0;
}
-static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, s);
}
-static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1055,8 +1008,8 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ seq_puts(s, "qset_id roce_qset_mask nic_qset_mask ");
+ seq_puts(s, "qset_shaping_pass qset_bp_status\n");
for (qset_id = 0; qset_id < qset_num; qset_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
HCLGE_OPC_QSET_DFX_STS);
@@ -1065,17 +1018,14 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%04u %#x %#x %#x %#x\n",
- qset_id, req.bit0, req.bit1, req.bit2,
- req.bit3);
+ seq_printf(s, "%04u %#-16x%#-15x%#-19x%#-x\n",
+ qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1086,8 +1036,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ seq_puts(s, "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
for (pri_id = 0; pri_id < pri_num; pri_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
HCLGE_OPC_PRI_DFX_STS);
@@ -1096,24 +1045,21 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pri_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-10x%#-19x%#-x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ seq_puts(s, "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
HCLGE_OPC_PG_DFX_STS);
@@ -1122,47 +1068,41 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pg_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-9x%#-18x%#-x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc;
u16 nq_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ seq_puts(s, "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_NQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
- nq_id, le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%04u %#-19x",
+ nq_id, le32_to_cpu(desc.data[1]));
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- " %#x\n",
- le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
}
return 0;
}
-static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1176,16 +1116,13 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- req.bit0);
- *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- req.bit1);
+ seq_printf(s, "port_mask: %#x\n", req.bit0);
+ seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
return 0;
}
-static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc[2];
u8 port_id = 0;
@@ -1196,32 +1133,23 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
- le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "sch_roce_fifo_afull_gap: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx_private_waterline: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
- le32_to_cpu(desc[0].data[5]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
- le32_to_cpu(desc[1].data[0]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
- le32_to_cpu(desc[1].data[1]));
+ seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
+ seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
@@ -1231,65 +1159,60 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
return 0;
}
-static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_qset(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pri(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pg(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_queue(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_port(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+ return hclge_dbg_dump_dcb_tm(hdev, s);
}
-static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
+static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
const struct hclge_dbg_reg_type_info *reg_info;
- int pos = 0, ret = 0;
- int i;
+ int ret = 0;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
if (cmd == reg_info->cmd) {
if (cmd == HNAE3_DBG_CMD_REG_TQP)
- return hclge_dbg_dump_reg_tqp(hdev, reg_info,
- buf, len, &pos);
+ return hclge_dbg_dump_reg_tqp(hdev,
+ reg_info, s);
- ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
- len, &pos);
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
if (ret)
break;
}
@@ -1298,12 +1221,57 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
return ret;
}
-static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
+}
+
+static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
+}
+
+static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
+}
+
+static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
+}
+
+static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
+}
+
+static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
+}
+
+static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
+}
+
+static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
+}
+
+static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
+}
+
+static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ets_tc_weight_cmd *ets_weight;
+ const char *sch_mode_str;
struct hclge_desc desc;
- char *sch_mode_str;
- int pos = 0;
int ret;
u8 i;
@@ -1323,72 +1291,37 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
- hdev->tm_info.num_tc);
- pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
+ seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
- pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ seq_puts(s, "TC MODE WEIGHT\n");
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
- pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str, ets_weight->tc_weight[i]);
+ seq_printf(s, "%u %4s %3u\n", i, sch_mode_str,
+ ets_weight->tc_weight[i]);
}
return 0;
}
-static const struct hclge_dbg_item tm_pg_items[] = {
- { "ID", 2 },
- { "PRI_MAP", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
- char **result, u8 *index)
+static void hclge_dbg_fill_shaper_content(struct seq_file *s,
+ struct hclge_tm_shaper_para *para)
{
- sprintf(result[(*index)++], "%3u", para->ir_b);
- sprintf(result[(*index)++], "%3u", para->ir_u);
- sprintf(result[(*index)++], "%3u", para->ir_s);
- sprintf(result[(*index)++], "%3u", para->bs_b);
- sprintf(result[(*index)++], "%3u", para->bs_s);
- sprintf(result[(*index)++], "%3u", para->flag);
- sprintf(result[(*index)++], "%6u", para->rate);
+ seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
+ para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
}
-static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
- char *buf, int len)
+static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
- u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
- char content[HCLGE_DBG_TM_INFO_LEN];
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pg_id, sch_mode, weight, pri_bit_map;
+ const char *sch_mode_str;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
- result[i] = data_str;
- data_str += HCLGE_DBG_DATA_STR_LEN;
- }
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- NULL, ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "ID PRI_MAP MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
@@ -1418,68 +1351,41 @@ static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%02u", pg_id);
- sprintf(result[j++], "0x%02x", pri_bit_map);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- (const char **)result,
- ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%02u 0x%-7x%-6s%-6u", pg_id, pri_bit_map,
+ sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
-{
- char *data_str;
- int ret;
-
- data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
- HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
-
- kfree(data_str);
-
- return ret;
-}
-
-static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_shaper_para shaper_para;
- int pos = 0;
int ret;
ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u %1u %6u\n",
- shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
- shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
- shaper_para.rate);
+ seq_puts(s, "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ seq_printf(s, "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
return 0;
}
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
- char *buf, int len)
+ struct seq_file *s)
{
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_bp_to_qs_map_cmd *map;
struct hclge_desc desc;
- int pos = 0;
u8 group_id;
u8 grp_num;
u16 i = 0;
@@ -1505,27 +1411,27 @@ static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- pos += scnprintf(buf + pos, len - pos,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[i + 7],
- qset_mapping[i + 6], qset_mapping[i + 5],
- qset_mapping[i + 4], qset_mapping[i + 3],
- qset_mapping[i + 2], qset_mapping[i + 1],
- qset_mapping[i]);
+ seq_printf(s,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
i += 8;
}
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u16 queue_id;
u16 qset_id;
u8 link_vld;
- int pos = 0;
u8 pri_id;
u8 tc_id;
int ret;
@@ -1544,32 +1450,28 @@ static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %3u %2u\n",
- queue_id, qset_id, pri_id, tc_id);
+ seq_puts(s, "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ seq_printf(s, "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev))
continue;
- ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
@@ -1582,65 +1484,36 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
- pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
- nodes->pg_base_id, nodes->pg_num);
- pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
- nodes->pri_base_id, nodes->pri_num);
- pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
- le16_to_cpu(nodes->qset_base_id),
- le16_to_cpu(nodes->qset_num));
- pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
- le16_to_cpu(nodes->queue_base_id),
- le16_to_cpu(nodes->queue_num));
+ seq_puts(s, " BASE_ID MAX_NUM\n");
+ seq_printf(s, "PG %4u %4u\n", nodes->pg_base_id,
+ nodes->pg_num);
+ seq_printf(s, "PRI %4u %4u\n", nodes->pri_base_id,
+ nodes->pri_num);
+ seq_printf(s, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ seq_printf(s, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
return 0;
}
-static const struct hclge_dbg_item tm_pri_items[] = {
- { "ID", 4 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
- char content[HCLGE_DBG_TM_INFO_LEN];
- u8 pri_num, sch_mode, weight, i, j;
- char *data_str;
- int pos, ret;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pri_num, sch_mode, weight, i;
+ const char *sch_mode_str;
+ int ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
- GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- NULL, ARRAY_SIZE(tm_pri_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -1666,59 +1539,31 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- (const char **)result,
- ARRAY_SIZE(tm_pri_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-6s%-6u", i, sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
out:
- kfree(data_str);
return ret;
}
-static const struct hclge_dbg_item tm_qset_items[] = {
- { "ID", 4 },
- { "MAP_PRI", 2 },
- { "LINK_VLD", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "IR_B", 2 },
- { "IR_U", 2 },
- { "IR_S", 2 },
- { "BS_B", 2 },
- { "BS_S", 2 },
- { "FLAG", 2 },
- { "RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 priority, link_vld, sch_mode, weight;
struct hclge_tm_shaper_para shaper_para;
- char content[HCLGE_DBG_TM_INFO_LEN];
+ const char *sch_mode_str;
u16 qset_num, i;
- int ret, pos;
- u8 j;
+ int ret;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
- result[i] = &data_str[i][0];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- NULL, ARRAY_SIZE(tm_qset_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MAP_PRI LINK_VLD MODE DWRR IR_B IR_U IR_S ");
+ seq_puts(s, "BS_B BS_S FLAG RATE(Mbps)\n");
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -1740,29 +1585,22 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4u", priority);
- sprintf(result[j++], "%4u", link_vld);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- (const char **)result,
- ARRAY_SIZE(tm_qset_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-9u%-10u%-6s%-6u", i, priority, link_vld,
+ sch_mode_str, weight);
+ seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
}
return 0;
}
-static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -1775,23 +1613,21 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+ seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
+ seq_printf(s, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
return 0;
}
#define HCLGE_DBG_TC_MASK 0x0F
-static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
{
#define HCLGE_DBG_TC_BIT_WIDTH 4
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
- int pos = 0;
u8 *pri_tc;
u8 tc, i;
int ret;
@@ -1806,33 +1642,33 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
- pri_map->vlan_pri);
- pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+ seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ seq_puts(s, "PRI TC\n");
pri_tc = (u8 *)pri_map;
for (i = 0; i < HNAE3_MAX_TC; i++) {
tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
tc &= HCLGE_DBG_TC_MASK;
- pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ seq_printf(s, "%u %u\n", i, tc);
}
return 0;
}
-static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
{
- struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ struct hnae3_knic_private_info *kinfo;
u8 *req0 = (u8 *)desc[0].data;
u8 *req1 = (u8 *)desc[1].data;
u8 dscp_tc[HNAE3_MAX_DSCP];
- int pos, ret;
+ int ret;
u8 i, j;
- pos = scnprintf(buf, len, "tc map mode: %s\n",
- tc_map_mode_str[kinfo->tc_map_mode]);
+ kinfo = &hdev->vport[0].nic.kinfo;
+
+ seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
return 0;
@@ -1847,7 +1683,7 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+ seq_puts(s, "\nDSCP PRIO TC\n");
/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
@@ -1865,18 +1701,17 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
continue;
- pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
- i, kinfo->dscp_prio[i], dscp_tc[i]);
+ seq_printf(s, " %2u %u %u\n", i, kinfo->dscp_prio[i],
+ dscp_tc[i]);
}
return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
@@ -1889,19 +1724,17 @@ static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
@@ -1912,26 +1745,24 @@ static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ seq_printf(s, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
@@ -1943,21 +1774,19 @@ static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
}
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "\n");
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ seq_puts(s, "\n");
+ seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
@@ -1969,20 +1798,18 @@ static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
}
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
@@ -1997,28 +1824,25 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return pos;
+ return 0;
}
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
- char *buf, int len)
+ struct seq_file *s)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
@@ -2031,86 +1855,75 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
if (ret < 0)
return ret;
return 0;
}
-static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_ethertype_idx_rd_cmd *req0;
struct hclge_desc desc;
u32 msg_egress_port;
- int pos = 0;
int ret, i;
- pos += scnprintf(buf + pos, len - pos,
- "entry mac_addr mask ether ");
- pos += scnprintf(buf + pos, len - pos,
- "mask vlan mask i_map i_dir e_type ");
- pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+ seq_puts(s, "entry mac_addr mask ether ");
+ seq_puts(s, "mask vlan mask i_map i_dir e_type ");
+ seq_puts(s, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2123,46 +1936,40 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
if (!req0->resp_code)
continue;
- pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
- le16_to_cpu(req0->index), req0->mac_addr);
+ seq_printf(s, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
- pos += scnprintf(buf + pos, len - pos,
- "%x %04x %x %04x ",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) &
- HCLGE_DBG_MNG_VLAN_TAG);
+ seq_printf(s, "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
- pos += scnprintf(buf + pos, len - pos,
- "%x %02x %02x ",
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ seq_printf(s, "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- pos += scnprintf(buf + pos, len - pos,
- "%x %x %02x %04x %x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ seq_printf(s, "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
return 0;
}
-#define HCLGE_DBG_TCAM_BUF_SIZE 256
-
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
- char *tcam_buf,
+ struct seq_file *s,
struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
- int pos = 0;
int ret, i;
__le32 *req;
@@ -2184,27 +1991,23 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
if (ret)
return ret;
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
- tcam_msg.loc);
+ seq_printf(s, "read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
return ret;
}
@@ -2228,14 +2031,13 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
return cnt;
}
-static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
{
- u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
- char *tcam_buf;
- int pos = 0;
+ u32 rule_num;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
@@ -2243,6 +2045,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
return -EOPNOTSUPP;
}
+ rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
if (!hdev->hclge_fd_rule_num || !rule_num)
return 0;
@@ -2250,12 +2053,6 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
if (!rule_locs)
return -ENOMEM;
- tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
- if (!tcam_buf) {
- kfree(rule_locs);
- return -ENOMEM;
- }
-
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt < 0) {
ret = rule_cnt;
@@ -2269,38 +2066,34 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
- ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
-
- ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
- kfree(tcam_buf);
kfree(rule_locs);
return ret;
}
-static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_fd_ad_cnt_read_cmd *req;
char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- int pos = 0;
int ret;
u64 cnt;
u8 i;
@@ -2308,8 +2101,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- pos += scnprintf(buf + pos, len - pos,
- "func_id\thit_times\n");
+ seq_puts(s, "func_id\thit_times\n");
for (i = 0; i < func_num; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
@@ -2323,8 +2115,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
}
cnt = le64_to_cpu(req->cnt);
hclge_dbg_get_func_id_str(str_id, i);
- pos += scnprintf(buf + pos, len - pos,
- "%s\t%llu\n", str_id, cnt);
+ seq_printf(s, "%s\t%llu\n", str_id, cnt);
}
return 0;
@@ -2375,74 +2166,95 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
-static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u32 i, offset;
+
+ seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
+ seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
+ seq_printf(s, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
+ seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
+ seq_printf(s, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
+ seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ seq_printf(s, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
unsigned long rem_nsec;
- int pos = 0;
u64 lc;
lc = local_clock();
rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
- (unsigned long)lc, rem_nsec / 1000);
- pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
- jiffies_to_msecs(jiffies - hdev->last_serv_processed));
- pos += scnprintf(buf + pos, len - pos,
- "last_service_task_processed: %lu(jiffies)\n",
- hdev->last_serv_processed);
- pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
- hdev->serv_processed_cnt);
+ seq_printf(s, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ seq_printf(s, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
return 0;
}
-static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
- pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
- hdev->num_nic_msi);
- pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
- hdev->num_roce_msi);
- pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
- hdev->num_msi_used);
- pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
- hdev->num_msi_left);
+ seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
+ seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
return 0;
}
-static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
- char *buf, int len, u32 bd_num)
+static void hclge_dbg_imp_info_data_print(struct seq_file *s,
+ struct hclge_desc *desc_src,
+ u32 bd_num)
{
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
struct hclge_desc *desc_index = desc_src;
u32 offset = 0;
- int pos = 0;
u32 i, j;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
for (i = 0; i < bd_num; i++) {
j = 0;
while (j < HCLGE_DESC_DATA_LEN - 1) {
- pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
- offset);
- pos += scnprintf(buf + pos, len - pos, "0x%08x ",
- le32_to_cpu(desc_index->data[j++]));
- pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
- le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%04x | ", offset);
+ seq_printf(s, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
}
desc_index++;
}
}
-static int
-hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_get_imp_bd_cmd *req;
struct hclge_desc *desc_src;
struct hclge_desc desc;
@@ -2479,7 +2291,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+ hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
kfree(desc_src);
@@ -2490,7 +2302,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -2502,9 +2314,8 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, len - *pos,
- "0x%04x | 0x%08x\n", offset,
- le32_to_cpu(desc[i].data[j]));
+ seq_printf(s, "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
offset += sizeof(u32);
*index -= sizeof(u32);
@@ -2515,19 +2326,18 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
}
}
-static int
-hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
{
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
- int pos = 0;
u32 data0;
int ret;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
while (index > 0) {
data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
@@ -2540,27 +2350,26 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ hclge_ncl_config_data_print(desc, &index, s);
}
return 0;
}
-static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
- int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
- hdev->hw.mac.mac_id);
+ seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2572,8 +2381,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2584,24 +2392,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
- pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ seq_printf(s, "serdes parallel loopback: %s\n",
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
}
return 0;
@@ -2610,107 +2416,75 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static int
-hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
- int pos = 0;
- pos += scnprintf(buf + pos, len - pos,
- "Recently generated mac tnl interruption:\n");
+ seq_puts(s, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos,
- "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+ seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
}
return 0;
}
-
-static const struct hclge_dbg_item mac_list_items[] = {
- { "FUNC_ID", 2 },
- { "MAC_ADDR", 12 },
- { "STATE", 2 },
-};
-
-static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
- bool is_unicast)
+static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
{
- char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
- char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
- int pos = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
- is_unicast ? "UC" : "MC");
- hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
- NULL, ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
+ seq_puts(s, "FUNC_ID MAC_ADDR STATE\n");
for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
- i = 0;
- result[i++] = hclge_dbg_get_func_id_str(str_id,
- func_id);
- sprintf(result[i++], "%pM", mac_node->mac_addr);
- sprintf(result[i++], "%5s",
- hclge_mac_state_str[mac_node->state]);
- hclge_dbg_fill_content(content, sizeof(content),
- mac_list_items,
- (const char **)result,
- ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ if (func_id)
+ seq_printf(s, "vf%-7u", func_id - 1U);
+ else
+ seq_puts(s, "pf ");
+ seq_printf(s, "%pM ", mac_node->mac_addr);
+ seq_printf(s, "%5s\n",
+ hclge_mac_state_str[mac_node->state]);
}
spin_unlock_bh(&vport->mac_list_lock);
}
}
-static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1;
struct hclge_vport *vport;
- int pos = 0;
u8 i;
- pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
- hdev->num_alloc_vport);
- pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
- hdev->max_umv_size);
- pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
- hdev->wanted_umv_size);
- pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
- hdev->priv_umv_size);
+ seq_printf(s, "num_alloc_vport : %u\n", hdev->num_alloc_vport);
+ seq_printf(s, "max_umv_size : %u\n", hdev->max_umv_size);
+ seq_printf(s, "wanted_umv_size : %u\n", hdev->wanted_umv_size);
+ seq_printf(s, "priv_umv_size : %u\n", hdev->priv_umv_size);
mutex_lock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
- hdev->share_umv_size);
+ seq_printf(s, "share_umv_size : %u\n", hdev->share_umv_size);
for (i = 0; i < func_num; i++) {
vport = &hdev->vport[i];
- pos += scnprintf(buf + pos, len - pos,
- "vport(%u) used_umv_num : %u\n",
- i, vport->used_umv_num);
+ seq_printf(s, "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
mutex_unlock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
- hdev->used_mc_mac_num);
+ seq_printf(s, "used_mc_mac_num : %u\n", hdev->used_mc_mac_num);
return 0;
}
@@ -2852,38 +2626,12 @@ static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
return 0;
}
-static const struct hclge_dbg_item vlan_filter_items[] = {
- { "FUNC_ID", 2 },
- { "I_VF_VLAN_FILTER", 2 },
- { "E_VF_VLAN_FILTER", 2 },
- { "PORT_VLAN_FILTER_BYPASS", 0 }
-};
-
-static const struct hclge_dbg_item vlan_offload_items[] = {
- { "FUNC_ID", 2 },
- { "PVID", 4 },
- { "ACCEPT_TAG1", 2 },
- { "ACCEPT_TAG2", 2 },
- { "ACCEPT_UNTAG1", 2 },
- { "ACCEPT_UNTAG2", 2 },
- { "INSERT_TAG1", 2 },
- { "INSERT_TAG2", 2 },
- { "SHIFT_TAG", 2 },
- { "STRIP_TAG1", 2 },
- { "STRIP_TAG2", 2 },
- { "DROP_TAG1", 2 },
- { "DROP_TAG2", 2 },
- { "PRI_ONLY_TAG1", 2 },
- { "PRI_ONLY_TAG2", 0 }
-};
-
-static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_filter_items)];
- u8 i, j, vlan_fe, bypass, ingress, egress;
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ u8 i, vlan_fe, bypass, ingress, egress;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
@@ -2893,14 +2641,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
- *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
- *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
+ seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
- hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
- NULL, ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID I_VF_VLAN_FILTER E_VF_VLAN_FILTER ");
+ seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
@@ -2913,37 +2658,32 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
if (ret)
return ret;
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_filter_items, result,
- ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ seq_printf(s, "%-9s%-18s%-18s%s\n",
+ hclge_dbg_get_func_id_str(str_id, i),
+ str_on_off(ingress), str_on_off(egress),
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA");
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
return 0;
}
-static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_offload_items)];
- char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_dbg_vlan_cfg vlan_cfg;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
- u8 i, j;
+ u8 i;
- hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
- NULL, ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID PVID ACCEPT_TAG1 ACCEPT_TAG2 ACCEPT_UNTAG1 ");
+ seq_puts(s, "ACCEPT_UNTAG2 INSERT_TAG1 INSERT_TAG2 SHIFT_TAG ");
+ seq_puts(s, "STRIP_TAG1 STRIP_TAG2 DROP_TAG1 DROP_TAG2 ");
+ seq_puts(s, "PRI_ONLY_TAG1 PRI_ONLY_TAG2\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
@@ -2954,107 +2694,92 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
if (ret)
return ret;
- sprintf(str_pvid, "%u", vlan_cfg.pvid);
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
-
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_offload_items, result,
- ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
+ seq_printf(s, "%-6u", vlan_cfg.pvid);
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
+ seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
}
return 0;
}
-static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+ return hclge_dbg_dump_vlan_offload_config(hdev, s);
}
-static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
- int pos = 0;
u32 hw_cfg;
int ret;
- pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
- ptp->info.name);
- pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
+ seq_printf(s, "ptp enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
+ seq_printf(s, "ptp tx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
+ seq_printf(s, "ptp rx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
- pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
- last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+ seq_printf(s, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
- pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
- tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
- pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
- ptp->tx_skipped);
- pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
- ptp->tx_timeout);
- pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
- ptp->last_tx_seqid);
+ seq_printf(s, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
+ seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
+ seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
+ seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
+
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
- sw_cfg, hw_cfg);
+ seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
- pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
- ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+ seq_printf(s, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, true);
+ hclge_dbg_dump_mac_list(s, true);
return 0;
}
-static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, false);
+ hclge_dbg_dump_mac_list(s, false);
return 0;
}
@@ -3062,156 +2787,156 @@ static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
{
.cmd = HNAE3_DBG_CMD_TM_NODES,
- .dbg_dump = hclge_dbg_dump_tm_nodes,
+ .dbg_read_func = hclge_dbg_dump_tm_nodes,
},
{
.cmd = HNAE3_DBG_CMD_TM_PRI,
- .dbg_dump = hclge_dbg_dump_tm_pri,
+ .dbg_read_func = hclge_dbg_dump_tm_pri,
},
{
.cmd = HNAE3_DBG_CMD_TM_QSET,
- .dbg_dump = hclge_dbg_dump_tm_qset,
+ .dbg_read_func = hclge_dbg_dump_tm_qset,
},
{
.cmd = HNAE3_DBG_CMD_TM_MAP,
- .dbg_dump = hclge_dbg_dump_tm_map,
+ .dbg_read_func = hclge_dbg_dump_tm_map,
},
{
.cmd = HNAE3_DBG_CMD_TM_PG,
- .dbg_dump = hclge_dbg_dump_tm_pg,
+ .dbg_read_func = hclge_dbg_dump_tm_pg,
},
{
.cmd = HNAE3_DBG_CMD_TM_PORT,
- .dbg_dump = hclge_dbg_dump_tm_port,
+ .dbg_read_func = hclge_dbg_dump_tm_port,
},
{
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
- .dbg_dump = hclge_dbg_dump_tc,
+ .dbg_read_func = hclge_dbg_dump_tc,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
- .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
- .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ .dbg_read_func = hclge_dbg_dump_qos_pri_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
- .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ .dbg_read_func = hclge_dbg_dump_qos_dscp_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
- .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
},
{
.cmd = HNAE3_DBG_CMD_MAC_UC,
- .dbg_dump = hclge_dbg_dump_mac_uc,
+ .dbg_read_func = hclge_dbg_dump_mac_uc,
},
{
.cmd = HNAE3_DBG_CMD_MAC_MC,
- .dbg_dump = hclge_dbg_dump_mac_mc,
+ .dbg_read_func = hclge_dbg_dump_mac_mc,
},
{
.cmd = HNAE3_DBG_CMD_MNG_TBL,
- .dbg_dump = hclge_dbg_dump_mng_table,
+ .dbg_read_func = hclge_dbg_dump_mng_table,
},
{
.cmd = HNAE3_DBG_CMD_LOOPBACK,
- .dbg_dump = hclge_dbg_dump_loopback,
+ .dbg_read_func = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
- .dbg_dump = hclge_dbg_dump_ptp_info,
+ .dbg_read_func = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
- .dbg_dump = hclge_dbg_dump_interrupt,
+ .dbg_read_func = hclge_dbg_dump_interrupt,
},
{
.cmd = HNAE3_DBG_CMD_RESET_INFO,
- .dbg_dump = hclge_dbg_dump_rst_info,
+ .dbg_read_func = hclge_dbg_seq_dump_rst_info,
},
{
.cmd = HNAE3_DBG_CMD_IMP_INFO,
- .dbg_dump = hclge_dbg_get_imp_stats_info,
+ .dbg_read_func = hclge_dbg_get_imp_stats_info,
},
{
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
- .dbg_dump = hclge_dbg_dump_ncl_config,
+ .dbg_read_func = hclge_dbg_dump_ncl_config,
},
{
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_SSU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RPU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_NCSI,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RTC,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_PPP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RCB,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_TQP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_MAC,
- .dbg_dump = hclge_dbg_dump_mac,
+ .dbg_read_func = hclge_dbg_dump_mac,
},
{
.cmd = HNAE3_DBG_CMD_REG_DCB,
- .dbg_dump = hclge_dbg_dump_dcb,
+ .dbg_read_func = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
- .dbg_dump = hclge_dbg_dump_fd_tcam,
+ .dbg_read_func = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
- .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ .dbg_read_func = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
- .dbg_dump = hclge_dbg_dump_serv_info,
+ .dbg_read_func = hclge_dbg_dump_serv_info,
},
{
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
- .dbg_dump = hclge_dbg_dump_vlan_config,
+ .dbg_read_func = hclge_dbg_dump_vlan_config,
},
{
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
- .dbg_dump = hclge_dbg_dump_fd_counter,
+ .dbg_read_func = hclge_dbg_dump_fd_counter,
},
{
.cmd = HNAE3_DBG_CMD_UMV_INFO,
- .dbg_dump = hclge_dbg_dump_umv_info,
+ .dbg_read_func = hclge_dbg_dump_umv_info,
},
};
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len)
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func)
{
struct hclge_vport *vport = hclge_get_vport(handle);
const struct hclge_dbg_func *cmd_func;
@@ -3221,11 +2946,8 @@ int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
if (cmd == hclge_dbg_cmd_func[i].cmd) {
cmd_func = &hclge_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(hdev, buf, len);
- else
- return cmd_func->dbg_dump_reg(hdev, cmd, buf,
- len);
+ *func = cmd_func->dbg_read_func;
+ return 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 2b998cbed826..317f79efd54c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -92,6 +92,7 @@ struct hclge_dbg_func {
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
char *buf, int len);
+ read_func dbg_read_func;
};
struct hclge_dbg_status_dfx_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 05942fa78b11..f209a05e2033 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -489,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -581,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -598,7 +599,7 @@ static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 **data)
{
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
return;
@@ -2357,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2623,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2651,7 +2652,7 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
if (ret)
return ret;
- hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
@@ -3445,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4470,14 +4484,26 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
@@ -4846,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4864,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6963,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -7849,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7857,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -7974,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -8197,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9266,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -9550,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -10597,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
@@ -10687,7 +10719,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10698,7 +10730,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -11174,9 +11206,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11188,7 +11220,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11232,7 +11264,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11397,7 +11429,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -11466,7 +11498,7 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->reset_timer.function)
- del_timer_sync(&hdev->reset_timer);
+ timer_delete_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
}
@@ -11881,9 +11913,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11896,6 +11925,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -11949,7 +11982,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11957,7 +11990,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -12061,7 +12094,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -12086,7 +12119,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12301,7 +12334,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
@@ -12837,7 +12870,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12877,7 +12910,7 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
@@ -12892,9 +12925,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b9fc719880bb..032b472d2368 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,8 +1142,8 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 59c863306657..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..96553109f44c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5505caea88e9..4bd52eab3914 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -58,6 +59,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
@@ -223,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -436,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -447,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
@@ -480,7 +485,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
@@ -492,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -507,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -515,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 63483636c074..61faddcc3dd0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2f6ffb88e700..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -606,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -624,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -1393,6 +1405,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1542,7 +1565,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1662,6 +1685,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1671,14 +1696,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1689,8 +1715,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1847,14 +1885,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1977,7 +2015,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2019,7 +2057,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
static void hclgevf_reset_timer(struct timer_list *t)
{
- struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
@@ -2178,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2287,6 +2326,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2424,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2584,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
@@ -2851,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2966,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2986,7 +3027,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
@@ -3054,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
@@ -3383,8 +3419,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cccef3228461..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 6db415d8b917..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,40 +123,42 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, j, reg_um;
+ struct hnae3_queue *tqp;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(common_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
+ tqp = &hdev->htqp[j].q;
+ for (i = 0; i < reg_num; i++)
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
- reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};