summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c165
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c330
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c333
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c204
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h8
20 files changed, 1262 insertions, 541 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 858cb293152a..5d7824d2b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1502,7 +1502,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a7daf6d4511e..e9e60a935f40 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -272,7 +272,7 @@ struct hnae3_ring_chain_node {
};
#define HNAE3_IS_TX_RING(node) \
- (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+ (((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX)
/* device specification info from firmware */
struct hnae3_dev_specs {
@@ -284,13 +284,14 @@ struct hnae3_dev_specs {
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+ u16 max_frm_size;
+ u16 max_qset_num;
};
struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
void (*link_status_change)(struct hnae3_handle *handle, bool state);
- int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
void (*process_hw_error)(struct hnae3_handle *handle,
@@ -410,8 +411,6 @@ struct hnae3_ae_dev {
* Get the len of the regs dump
* get_rss_key_size()
* Get rss key size
- * get_rss_indir_size()
- * Get rss indirection table size
* get_rss()
* Get rss table
* set_rss()
@@ -465,6 +464,8 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
+ * dbg_read_cmd
+ * Execute debugfs read command.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -553,7 +554,6 @@ struct hnae3_ae_ops {
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
- u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
@@ -620,6 +620,8 @@ struct hnae3_ae_ops {
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
+ int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -773,9 +775,13 @@ struct hnae3_handle {
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae3_set_bit(origin, shift, val) \
- hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+ hnae3_set_field(origin, 0x1 << (shift), shift, val)
#define hnae3_get_bit(origin, shift) \
- hnae3_get_field((origin), (0x1 << (shift)), (shift))
+ hnae3_get_field(origin, 0x1 << (shift), shift)
+
+#define HNAE3_DBG_TM_NODES "tm_nodes"
+#define HNAE3_DBG_TM_PRI "tm_priority"
+#define HNAE3_DBG_TM_QSET "tm_qset"
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9d4e9c053a8f..dd11c57027bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -7,7 +7,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -162,7 +162,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
continue;
dev_info(&h->pdev->dev,
- " %4d %4d %4d\n",
+ " %4d %4u %4d\n",
i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
@@ -389,6 +389,9 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
kinfo->tc_info.num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
+ dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
+ dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
@@ -420,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
return (*ppos = len);
}
+static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ int ret = 0;
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -427,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
- int ret = 0;
+ int ret;
if (*ppos != 0)
return 0;
@@ -458,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
count = cmd_buf_tmp - cmd_buf + 1;
}
- if (strncmp(cmd_buf, "help", 4) == 0)
- hns3_dbg_help(handle);
- else if (strncmp(cmd_buf, "queue info", 10) == 0)
- ret = hns3_dbg_queue_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "queue map", 9) == 0)
- ret = hns3_dbg_queue_map(handle);
- else if (strncmp(cmd_buf, "bd info", 7) == 0)
- ret = hns3_dbg_bd_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "dev capability", 14) == 0)
- hns3_dbg_dev_caps(handle);
- else if (strncmp(cmd_buf, "dev spec", 8) == 0)
- hns3_dbg_dev_specs(handle);
- else if (handle->ae_algo->ops->dbg_run_cmd)
- ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
- else
- ret = -EOPNOTSUPP;
-
+ ret = hns3_dbg_check_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
@@ -484,6 +495,39 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
return count;
}
+static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *read_buf;
+ ssize_t size = 0;
+ int ret = 0;
+
+ read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ cmd_buf = filp->f_path.dentry->d_iname;
+
+ if (ops->dbg_read_cmd)
+ ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf,
+ HNS3_DBG_READ_LEN);
+
+ if (ret) {
+ dev_info(priv->dev, "unknown command\n");
+ goto out;
+ }
+
+ size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+ strlen(read_buf));
+
+out:
+ kfree(read_buf);
+ return size;
+}
+
static const struct file_operations hns3_dbg_cmd_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -491,14 +535,31 @@ static const struct file_operations hns3_dbg_cmd_fops = {
.write = hns3_dbg_cmd_write,
};
+static const struct file_operations hns3_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_read,
+};
+
void hns3_dbg_init(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const char *name = pci_name(handle->pdev);
+ struct dentry *entry_dir;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
&hns3_dbg_cmd_fops);
+
+ entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs);
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2)
+ debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 405e49033417..bf4302a5cf95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -32,7 +32,7 @@
#define CREATE_TRACE_POINTS
#include "hns3_trace.h"
-#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
#define hns3_rl_err(fmt, ...) \
@@ -1070,7 +1070,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
* HW checksum of the non-IP packets and GSO packets is handled at
* different place in the following code
*/
- if (skb->csum_not_inet || skb_is_gso(skb) ||
+ if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
!test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
return false;
@@ -2329,7 +2329,7 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
pci_ers_result_t ret;
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+ dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-static bool hns3_page_is_reusable(struct page *page)
-{
- return page_to_nid(page) == numa_mem_id() &&
- !page_is_pfmemalloc(page);
-}
-
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
@@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages, or the stack is still using the page
- * when page_offset rollback to zero, flag default unreuse
+ /* Avoid re-using remote and pfmemalloc pages, or the stack is still
+ * using the page when page_offset rollback to zero, flag default
+ * unreuse
*/
- if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
+ if (!dev_page_is_reusable(desc_cb->priv) ||
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
@@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(hns3_page_is_reusable(desc_cb->priv)))
+ /* We can reuse buffer as-is, just make sure it is reusable */
+ if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv,
@@ -4089,7 +4084,7 @@ out_when_alloc_ring_memory:
return -ENOMEM;
}
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -4098,7 +4093,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
hns3_fini_ring(&priv->ring[i]);
hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
- return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
@@ -4286,8 +4280,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dbg_init(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
- netdev->max_mtu = HNS3_MAX_MTU;
+ netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
@@ -4327,7 +4320,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4353,9 +4345,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
@@ -4384,20 +4374,6 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
}
}
-static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct net_device *ndev = kinfo->netdev;
-
- if (tc > HNAE3_MAX_TC)
- return -EINVAL;
-
- if (!ndev)
- return -ENODEV;
-
- return hns3_nic_set_real_num_queue(ndev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4664,7 +4640,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
@@ -4682,13 +4657,11 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
- return ret;
+ return 0;
}
static int hns3_reset_notify(struct hnae3_handle *handle,
@@ -4834,7 +4807,6 @@ static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
- .setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
.process_hw_error = hns3_process_hw_error,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 0a7b606e7c93..d069b04ee587 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -56,9 +56,8 @@ enum hns3_nic_state {
#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
-#define HNS3_MAC_MAX_FRAME 9728
-#define HNS3_MAX_MTU \
- (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
+#define HNS3_MAX_MTU(max_frm_size) \
+ ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -555,7 +554,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hns3_read_dev(a, reg) \
- hns3_read_reg((a)->io_base, (reg))
+ hns3_read_reg((a)->io_base, reg)
static inline bool hns3_nic_resetting(struct net_device *netdev)
{
@@ -565,7 +564,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
}
#define hns3_write_dev(a, reg, value) \
- hns3_write_reg((a)->io_base, (reg), (value))
+ hns3_write_reg((a)->io_base, reg, value)
#define ring_to_dev(ring) ((ring)->dev)
@@ -589,15 +588,15 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+ for (pos = (head).ring; (pos); pos = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
-#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
-#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
void hns3_ethtool_set_ops(struct net_device *netdev);
@@ -606,7 +605,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index e2fc443fe92c..adcec4ea7cb9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -456,7 +456,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -859,11 +859,9 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- if (!h->ae_algo->ops->get_rss_indir_size)
- return 0;
-
- return h->ae_algo->ops->get_rss_indir_size(h);
+ return ae_dev->dev_specs.rss_ind_tbl_size;
}
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index b728be4737f8..1bd0ddfaec4d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
-static int hclge_cmd_convert_err_code(u16 desc_ret)
+struct errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num)
{
- switch (desc_ret) {
- case HCLGE_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGE_CMD_NO_AUTH:
- return -EPERM;
- case HCLGE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGE_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGE_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGE_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGE_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGE_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGE_CMD_TIMEOUT:
- return -ETIME;
- case HCLGE_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGE_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGE_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
}
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ struct errcode hclge_cmd_errcode[] = {
+ {HCLGE_CMD_EXEC_SUCCESS, 0},
+ {HCLGE_CMD_NO_AUTH, -EPERM},
+ {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGE_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGE_CMD_NEXT_ERR, -ENOSR},
+ {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGE_CMD_PARA_ERR, -EINVAL},
+ {HCLGE_CMD_RESULT_ERR, -ERANGE},
+ {HCLGE_CMD_TIMEOUT, -ETIME},
+ {HCLGE_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGE_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
return hclge_cmd_convert_err_code(desc_ret);
}
+static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /**
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclge_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- struct hclge_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int retval;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclge_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!complete)
- retval = -EBADE;
- else
- retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- retval = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclge_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return retval;
+ return ret;
}
static void hclge_set_default_capability(struct hclge_dev *hdev)
@@ -363,6 +380,15 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
}
+static __le32 hclge_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
{
@@ -373,6 +399,7 @@ hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_build_api_caps();
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index edfadb5cb1c3..ff52a65b4cff 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -160,6 +160,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
@@ -385,11 +386,15 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGE_API_CAP_BITS {
+ HCLGE_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -1126,7 +1131,8 @@ struct hclge_dev_specs_0_cmd {
#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
struct hclge_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 max_qset_num;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -1138,9 +1144,9 @@ static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, (reg), (value))
+ hclge_write_reg((a)->io_base, reg, value)
#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, (reg))
+ hclge_read_reg((a)->io_base, reg)
static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e08d11b8ecf1..5bf5db91d16c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -176,29 +176,6 @@ static int hclge_map_update(struct hclge_dev *hdev)
return hclge_rss_init_hw(hdev);
}
-static int hclge_client_setup_tc(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = hdev->vport;
- struct hnae3_client *client;
- struct hnae3_handle *handle;
- int ret;
- u32 i;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &vport[i].nic;
- client = handle->client;
-
- if (!client || !client->ops || !client->ops->setup_tc)
- continue;
-
- ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int hclge_notify_down_uinit(struct hclge_dev *hdev)
{
int ret;
@@ -257,10 +234,6 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_client_setup_tc(hdev);
- if (ret)
- goto err_out;
-
ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 8f6dea5198cf..6b1d197df881 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -696,17 +696,16 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_tqp_tx_queue_tc_cmd *tc;
+ u16 group_id, queue_id, qset_id;
enum hclge_opcode_type cmd;
+ u8 grp_num, pri_id, tc_id;
struct hclge_desc desc;
- int queue_id, group_id;
- int tc_id, qset_id;
- int pri_id, ret;
u16 qs_id_l;
u16 qs_id_h;
- u8 grp_num;
+ int ret;
u32 i;
- ret = kstrtouint(cmd_buf, 0, &queue_id);
+ ret = kstrtou16(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -754,7 +753,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
tc_id = tc->tc_id & 0x7;
dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
- dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n",
queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -800,6 +799,140 @@ err_tm_map_cmd_send:
cmd, ret);
}
+static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tm nodes, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
+ pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
+ nodes->pg_base_id, nodes->pg_num);
+ pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
+ nodes->pri_base_id, nodes->pri_num);
+ pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_pri_shaper_para c_shaper_para;
+ struct hclge_pri_shaper_para p_shaper_para;
+ u8 pri_num, sch_mode, weight;
+ char *sch_mode_str;
+ int pos = 0;
+ int ret;
+ u8 i;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ pos += scnprintf(buf + pos, len - pos,
+ "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U ");
+ pos += scnprintf(buf + pos, len - pos,
+ "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
+
+ for (i = 0; i < pri_num; i++) {
+ ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4s %3u %3u %3u %3u ",
+ i, sch_mode_str, weight, c_shaper_para.ir_b,
+ c_shaper_para.ir_u, c_shaper_para.ir_s);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %1u %6u ",
+ c_shaper_para.bs_b, c_shaper_para.bs_s,
+ c_shaper_para.flag, c_shaper_para.rate);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u ",
+ p_shaper_para.ir_b, p_shaper_para.ir_u,
+ p_shaper_para.ir_s, p_shaper_para.bs_b,
+ p_shaper_para.bs_s);
+ pos += scnprintf(buf + pos, len - pos, "%1u %6u\n",
+ p_shaper_para.flag, p_shaper_para.rate);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 priority, link_vld, sch_mode, weight;
+ char *sch_mode_str;
+ int ret, pos;
+ u16 qset_num;
+ u16 i;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n");
+
+ for (i = 0; i < qset_num; i++) {
+ ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %1u %4s %3u\n",
+ i, priority, link_vld, sch_mode_str, weight);
+ }
+
+ return 0;
+}
+
static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
{
struct hclge_cfg_pause_param_cmd *pause_param;
@@ -851,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
- struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
- struct hclge_rx_priv_wl_buf *rx_priv_wl;
- struct hclge_rx_com_wl *rx_packet_cnt;
- struct hclge_rx_com_thrd *rx_com_thrd;
- struct hclge_rx_com_wl *rx_com_wl;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc[2];
+ struct hclge_desc desc;
int i, ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
-
- tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
- rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(rx_buf_cmd->buf_num[i]));
@@ -891,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
le16_to_cpu(rx_buf_cmd->shared_buf));
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low));
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low));
- dev_info(&hdev->pdev->dev, "\n");
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
@@ -944,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
@@ -967,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ return 0;
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ enum hclge_opcode_type cmd;
+ int ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
return;
err_qos_cmd_send:
@@ -1465,8 +1670,6 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
const char *cmd_buf)
{
-#define HCLGE_MAX_QSET_NUM 1024
-
u16 qsid;
int ret;
@@ -1476,9 +1679,9 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
return;
}
- if (qsid >= HCLGE_MAX_QSET_NUM) {
- dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
- qsid);
+ if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
+ qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
return;
}
@@ -1591,3 +1794,22 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
return 0;
}
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES,
+ strlen(HNAE3_DBG_TM_NODES)) == 0)
+ return hclge_dbg_dump_tm_nodes(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI,
+ strlen(HNAE3_DBG_TM_PRI)) == 0)
+ return hclge_dbg_dump_tm_pri(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET,
+ strlen(HNAE3_DBG_TM_QSET)) == 0)
+ return hclge_dbg_dump_tm_qset(hdev, buf, len);
+
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 9ee55ee0487d..0ca7f1b984bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1073,7 +1073,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
* This function querys number of mpf and pf buffer descriptors.
*/
static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
- int *mpf_bd_num, int *pf_bd_num)
+ u32 *mpf_bd_num, u32 *pf_bd_num)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_min_bd_num, pf_min_bd_num;
@@ -1102,7 +1102,7 @@ static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
*mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
*pf_bd_num = le32_to_cpu(desc_bd.data[1]);
if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
- dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n",
*mpf_bd_num, *pf_bd_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c242883fea5d..34b744df6709 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -24,7 +25,7 @@
#include "hnae3.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -55,8 +56,6 @@
#define HCLGE_LINK_STATUS_MS 10
-#define HCLGE_VF_VPORT_START_NUM 1
-
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -628,7 +627,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -636,7 +635,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -930,7 +929,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return 0;
}
-static int hclge_parse_speed(int speed_cmd, int *speed)
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
case 6:
@@ -1373,6 +1372,8 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1391,7 +1392,9 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1406,8 +1409,12 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -4237,11 +4244,6 @@ static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_RSS_KEY_SIZE;
}
-static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_IND_TBL_SIZE;
-}
-
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
@@ -4283,6 +4285,7 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
+ int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
@@ -4291,8 +4294,10 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
@@ -4398,6 +4403,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
@@ -4422,7 +4428,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
/* Get indirect table */
if (indir)
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
return 0;
@@ -4431,6 +4437,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
@@ -4462,7 +4469,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* Update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
@@ -4494,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
+ struct ethtool_rxnfc *nfc,
+ struct hclge_rss_input_tuple_cmd *req)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
u8 tuple_sets;
- int ret;
-
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
@@ -4554,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4573,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
+ u8 *tuple_sets)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- u8 tuple_sets;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclge_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -4703,14 +4743,15 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
int i, j;
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
-static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
@@ -4718,6 +4759,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ u16 *rss_ind_tbl;
+
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
@@ -4739,11 +4782,19 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_algo = rss_algo;
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ vport[i].rss_indirection_tbl = rss_ind_tbl;
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
+
+ return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
@@ -5491,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->psrc)
@@ -5521,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->l4_proto)
@@ -5578,7 +5625,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
if (fs->m_ext.vlan_tci &&
be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
dev_err(&hdev->pdev->dev,
- "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
return -EINVAL;
}
@@ -8306,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
}
}
-void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
{
- int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_cfg, *tmp;
- struct hclge_dev *hdev = vport->back;
- struct list_head tmp_del_list, *list;
- int ret;
-
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- list = &vport->uc_mac_list;
- unsync = hclge_rm_uc_addr_common;
- } else {
- list = &vport->mc_mac_list;
- unsync = hclge_rm_mc_addr_common;
- }
-
- INIT_LIST_HEAD(&tmp_del_list);
-
- if (!is_del_list)
- set_bit(vport->vport_id, hdev->vport_config_block);
-
- spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, &tmp_del_list);
+ list_add_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -8345,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
break;
}
}
+}
- spin_unlock_bh(&vport->mac_list_lock);
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
- list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
@@ -8366,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
spin_lock_bh(&vport->mac_list_lock);
@@ -8772,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -8827,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -8841,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -8852,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9664,7 +9753,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
@@ -9813,12 +9902,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
@@ -10581,7 +10677,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- hclge_rss_init_cfg(hdev);
+ ret = hclge_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
@@ -10809,7 +10910,7 @@ static void hclge_reset_vf_rate(struct hclge_dev *hdev)
}
}
-static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
@@ -10830,7 +10931,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
if (ret)
return ret;
@@ -11072,6 +11173,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
@@ -11115,11 +11217,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
@@ -11799,7 +11902,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
- .get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -11850,6 +11952,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ca46bc9110d7..19d7f28773f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -17,6 +17,8 @@
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_VF_VPORT_START_NUM 1
+
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
@@ -44,15 +46,12 @@
#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
-#define HCLGE_RAS_OTHER_STS_REG 0x20B00
-#define HCLGE_FUNC_RESET_STS_REG 0x20C00
#define HCLGE_GRO_EN_REG 0x28000
/* bar registers for rcb */
@@ -97,8 +96,6 @@
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-#define HCLGE_RSS_CFG_TBL_NUM \
- (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
@@ -148,6 +145,8 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_MAX_QSET_NUM 1024
+
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
@@ -726,7 +725,7 @@ struct hclge_vf_vlan_cfg {
* x = (~k) & v
* y = (k ^ ~v) & k
*/
-#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_x(x, k, v) (x = ~(k) & (v))
#define calc_y(y, k, v) \
do { \
const typeof(k) _k_ = (k); \
@@ -922,7 +921,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
- u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
@@ -1006,6 +1005,8 @@ int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 754c09ada901..51a36e74f088 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -56,7 +56,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->msg.resp_status = resp;
} else {
dev_warn(&hdev->pdev->dev,
- "failed to send response to VF, response status %d is out-of-bound\n",
+ "failed to send response to VF, response status %u is out-of-bound\n",
resp);
resp_pf_to_vf->msg.resp_status = EIO;
}
@@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
- int i = 0;
+ int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
- req->msg.param[i].ring_type);
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg.param[i].tqp_index]);
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
index = mbx_req->msg.data[0];
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return;
+ }
+
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 82742a64f3b7..151afd1f0688 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -41,8 +41,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
struct hclge_shaper_ir_para *ir_para,
u32 max_tm_rate)
{
+#define DEFAULT_SHAPER_IR_B 126
#define DIVISOR_CLK (1000 * 8)
-#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
@@ -69,10 +70,10 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
+ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
ir_para->ir_u = 0;
ir_para->ir_s = 0;
@@ -81,7 +82,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc >= ir && ir) {
ir_s_calc++;
- ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
+ ir_calc = DEFAULT_DIVISOR_IR_B /
+ (tick * (1 << ir_s_calc));
}
ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
@@ -92,12 +94,12 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
while (ir_calc < ir) {
ir_u_calc++;
- numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
+ numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
ir_para->ir_b = (ir * tick + (denominator >> 1)) /
@@ -640,13 +642,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
- kinfo->tc_info.num_tc = vport->vport_id ? 1 :
+ if (vport->vport_id) {
+ kinfo->tc_info.num_tc = 1;
+ vport->qs_offset = HNAE3_MAX_TC +
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM;
+ vport_max_rss_size = hdev->vf_rss_size_max;
+ } else {
+ kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
- (vport->vport_id ? (vport->vport_id - 1) : 0);
+ vport->qs_offset = 0;
+ vport_max_rss_size = hdev->pf_rss_size_max;
+ }
- vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
- hdev->pf_rss_size_max;
max_rss_size = min_t(u16, vport_max_rss_size,
hclge_vport_get_max_rss_size(vport));
@@ -1616,3 +1623,189 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
return hclge_tm_bp_setup(hdev);
}
+
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ /* Each PF has 8 qsets and each VF has 1 qset */
+ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *qset_num = le16_to_cpu(nodes->qset_num);
+ return 0;
+}
+
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pri num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *pri_num = nodes->pri_num;
+ return 0;
+}
+
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset map priority, ret = %d\n", ret);
+ return ret;
+ }
+
+ *priority = map->priority;
+ *link_vld = map->link_vld;
+ return 0;
+}
+
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
+{
+ struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
+ qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
+ qs_sch_mode->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = qs_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
+{
+ struct hclge_qs_weight_cmd *qs_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ qs_weight->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = qs_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
+{
+ struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
+ pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
+ pri_sch_mode->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = pri_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ priority_weight->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = priority_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 5498d73ed34b..b25d76023af0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -17,10 +17,13 @@
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
-#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
#define HCLGE_ETHER_MAX_RATE 100000
+#define HCLGE_TM_PF_MAX_PRI_NUM 8
+#define HCLGE_TM_PF_MAX_QSET_NUM 8
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -65,6 +68,18 @@ struct hclge_priority_weight_cmd {
u8 dwrr;
};
+struct hclge_pri_sch_mode_cfg_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ u8 sch_mode;
+};
+
+struct hclge_qs_sch_mode_cfg_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ u8 sch_mode;
+};
+
struct hclge_qs_weight_cmd {
__le16 qs_id;
u8 dwrr;
@@ -173,13 +188,34 @@ struct hclge_shaper_ir_para {
u8 ir_s; /* IR_S parameter of IR shaper */
};
+struct hclge_tm_nodes_cmd {
+ u8 pg_base_id;
+ u8 pri_base_id;
+ __le16 qset_base_id;
+ __le16 queue_base_id;
+ u8 pg_num;
+ u8 pri_num;
+ __le16 qset_num;
+ __le16 queue_num;
+};
+
+struct hclge_pri_shaper_para {
+ u8 ir_b;
+ u8 ir_u;
+ u8 ir_s;
+ u8 bs_b;
+ u8 bs_s;
+ u8 flag;
+ u32 rate;
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH))
+ hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
+ HCLGE_TM_SHAP_##string##_LSH)
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
@@ -195,5 +231,15 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
-
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld);
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index e04c0cfeb95c..46700c427849 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+struct vf_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
static int hclgevf_cmd_convert_err_code(u16 desc_ret)
{
- switch (desc_ret) {
- case HCLGEVF_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGEVF_CMD_NO_AUTH:
- return -EPERM;
- case HCLGEVF_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGEVF_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGEVF_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGEVF_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGEVF_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGEVF_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGEVF_CMD_TIMEOUT:
- return -ETIME;
- case HCLGEVF_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGEVF_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGEVF_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct vf_errcode hclgevf_cmd_errcode[] = {
+ {HCLGEVF_CMD_EXEC_SUCCESS, 0},
+ {HCLGEVF_CMD_NO_AUTH, -EPERM},
+ {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
+ {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGEVF_CMD_PARA_ERR, -EINVAL},
+ {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
+ {HCLGEVF_CMD_TIMEOUT, -ETIME},
+ {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGEVF_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclgevf_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ /* Get the result of hardware write back */
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
}
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+ hw->cmq.last_status = desc_ret;
+
+ return hclgevf_cmd_convert_err_code(desc_ret);
+}
+
+static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
}
/* hclgevf_cmd_send - send command to command queue
@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- struct hclgevf_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int status = 0;
- u16 retval;
- u16 opcode;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = le16_to_cpu(desc[0].opcode);
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclgevf_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use);
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw))
- break;
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (hclgevf_cmd_csq_done(hw)) {
- complete = true;
- handle = 0;
-
- while (handle < num) {
- /* Get the result of hardware write back */
- desc_to_use = &hw->cmq.csq.desc[ntc];
- desc[handle] = *desc_to_use;
-
- if (likely(!hclgevf_is_special_opcode(opcode)))
- retval = le16_to_cpu(desc[handle].retval);
- else
- retval = le16_to_cpu(desc[0].retval);
-
- status = hclgevf_cmd_convert_err_code(retval);
- hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
- ntc++;
- handle++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- }
-
- if (!complete)
- status = -EBADE;
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return status;
+ return ret;
}
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
@@ -342,6 +362,15 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
+static __le32 hclgevf_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -352,6 +381,7 @@ static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ resp->api_caps = hclgevf_build_api_caps();
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
return status;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 82eed258e8c1..8a37a22a176b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -161,11 +161,15 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGEVF_API_CAP_BITS {
+ HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -212,8 +216,8 @@ struct hclgevf_rss_input_tuple_cmd {
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
- u16 start_table_index;
- u16 rss_set_bitmap;
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
@@ -225,7 +229,7 @@ struct hclgevf_rss_indirection_table_cmd {
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
struct hclgevf_rss_tc_mode_cmd {
- u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
@@ -274,7 +278,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
-#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
@@ -292,7 +295,8 @@ struct hclgevf_dev_specs_0_cmd {
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -310,9 +314,9 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
}
#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, (reg), (value))
+ hclgevf_write_reg((a)->io_base, reg, value)
#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, (reg))
+ hclgevf_read_reg((a)->io_base, reg)
#define HCLGEVF_SEND_SYNC(flag) \
((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 674b3a22e91f..700e068764c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -180,7 +180,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -188,7 +188,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -642,26 +642,25 @@ static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
return HCLGEVF_RSS_KEY_SIZE;
}
-static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_IND_TBL_SIZE;
-}
-
static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
{
const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
struct hclgevf_rss_indirection_table_cmd *req;
struct hclgevf_desc desc;
+ int rss_cfg_tbl_num;
int status;
int i, j;
req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGEVF_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
false);
- req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
- req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ req->start_table_index =
+ cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
@@ -702,12 +701,16 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -795,7 +798,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
}
if (indir)
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
return 0;
@@ -838,7 +841,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
@@ -870,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc,
+ struct hclgevf_rss_input_tuple_cmd *req)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
u8 tuple_sets;
- int ret;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
@@ -933,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -951,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
+ int flow_type, u8 *tuple_sets)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -2482,8 +2519,9 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
-static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
+static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
@@ -2492,7 +2530,16 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ u8 *rss_ind_tbl;
+
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
@@ -2510,8 +2557,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
}
/* Initialize RSS indirect table */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+
+ return 0;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
@@ -3048,6 +3097,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
@@ -3066,6 +3116,7 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
@@ -3080,6 +3131,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
@@ -3266,7 +3319,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- hclgevf_rss_init_cfg(hdev);
+ ret = hclgevf_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_config;
+ }
+
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3444,11 +3502,12 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
+ sizeof(u32), GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
hdev->rss_cfg.rss_size = kinfo->rss_size;
@@ -3687,7 +3746,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
.get_rss_key_size = hclgevf_get_rss_key_size,
- .get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index f6d817a3edcb..8c27ecd819af 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -113,8 +113,7 @@
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-#define HCLGEVF_RSS_CFG_TBL_NUM \
- (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
@@ -125,6 +124,8 @@
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+#define HCLGEVF_MAC_MAX_FRAME 9728
+
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
@@ -217,7 +218,8 @@ struct hclgevf_rss_cfg {
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
- u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ /* shadow table */
+ u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};