diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
21 files changed, 4362 insertions, 1497 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index be9dc08ccf67..038326cfda93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index fff5be8078ac..781e5dee3c70 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -29,8 +29,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } -static void hnae3_set_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, int inited) +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) { switch (client->type) { case HNAE3_CLIENT_KNIC: @@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, break; } } +EXPORT_SYMBOL(hnae3_set_client_init_flag); static int hnae3_get_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) @@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) { + if (ret) dev_err(&ae_dev->pdev->dev, "fail to instantiate client, ret = %d\n", ret); - return ret; - } - hnae3_set_client_init_flag(client, ae_dev, 1); - return 0; + return ret; } if (hnae3_get_client_init_flag(client, ae_dev)) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67befff0bfc5..e82e4ca20620 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -51,6 +51,7 @@ #define HNAE3_KNIC_CLIENT_INITED_B 0x3 #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 +#define HNAE3_DEV_SUPPORT_FD_B 0x6 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -61,6 +62,9 @@ #define hnae3_dev_dcb_supported(hdev) \ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) +#define hnae3_dev_fd_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) + #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ @@ -84,10 +88,11 @@ struct hnae3_queue { /*hnae3 loop mode*/ enum hnae3_loop { - HNAE3_MAC_INTER_LOOP_MAC, - HNAE3_MAC_INTER_LOOP_SERDES, - HNAE3_MAC_INTER_LOOP_PHY, - HNAE3_MAC_LOOP_NONE, + HNAE3_LOOP_APP, + HNAE3_LOOP_SERIAL_SERDES, + HNAE3_LOOP_PARALLEL_SERDES, + HNAE3_LOOP_PHY, + HNAE3_LOOP_NONE, }; enum hnae3_client_type { @@ -107,6 +112,7 @@ enum hnae3_media_type { HNAE3_MEDIA_TYPE_FIBER, HNAE3_MEDIA_TYPE_COPPER, HNAE3_MEDIA_TYPE_BACKPLANE, + HNAE3_MEDIA_TYPE_NONE, }; enum hnae3_reset_notify_type { @@ -173,6 +179,7 @@ struct hnae3_ae_dev { struct list_head node; u32 flag; enum hnae3_dev_type dev_type; + enum hnae3_reset_type reset_type; void *priv; }; @@ -337,6 +344,8 @@ struct hnae3_ae_ops { void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); int (*set_mac_addr)(struct hnae3_handle *handle, void *p, bool is_first); + int (*do_ioctl)(struct hnae3_handle *handle, + struct ifreq *ifr, int cmd); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -346,8 +355,6 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); - int (*update_mta_status)(struct hnae3_handle *handle); - void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); @@ -395,11 +402,11 @@ struct hnae3_ae_ops { int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); - void (*reset_event)(struct hnae3_handle *handle); + void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, - u16 *free_tqps, u16 *max_rss_size); + u16 *alloc_tqps, u16 *max_rss_size); int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); @@ -408,7 +415,21 @@ struct hnae3_ae_ops { void (*get_link_mode)(struct hnae3_handle *handle, unsigned long *supported, unsigned long *advertising); - void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); + int (*add_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*del_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + void (*del_all_fd_entries)(struct hnae3_handle *handle, + bool clear_list); + int (*get_fd_rule_cnt)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_info)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_all_rules)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + int (*restore_fd_rules)(struct hnae3_handle *handle); + void (*enable_fd)(struct hnae3_handle *handle, bool enable); + pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev); }; struct hnae3_dcb_ops { @@ -459,6 +480,7 @@ struct hnae3_knic_private_info { const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; + enum pkt_hash_types rss_type; }; struct hnae3_roce_private_info { @@ -476,10 +498,20 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) #define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) -#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) #define HNAE3_SUPPORT_VF BIT(3) +#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) + +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) struct hnae3_handle { struct hnae3_client *client; @@ -499,6 +531,8 @@ struct hnae3_handle { }; u32 numa_node_mask; /* for multi-chip support */ + + u8 netdev_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ @@ -521,4 +555,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03..32f3aca814e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -9,6 +9,7 @@ #include <linux/ipv6.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/skbuff.h> #include <linux/sctp.h> #include <linux/vermagic.h> @@ -21,6 +22,7 @@ static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); +static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; @@ -66,6 +68,23 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) return IRQ_HANDLED; } +/* This callback function is used to set affinity changes to the irq affinity + * masks when the irq_set_affinity_notifier function is used. + */ +static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct hns3_enet_tqp_vector *tqp_vectors = + container_of(notify, struct hns3_enet_tqp_vector, + affinity_notify); + + tqp_vectors->affinity_mask = *mask; +} + +static void hns3_nic_irq_affinity_release(struct kref *ref) +{ +} + static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) { struct hns3_enet_tqp_vector *tqp_vectors; @@ -77,6 +96,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) continue; + /* clear the affinity notifier and affinity mask */ + irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL); + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); + /* release the irq resource */ free_irq(tqp_vectors->vector_irq, tqp_vectors); tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -127,6 +150,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) return ret; } + tqp_vectors->affinity_notify.notify = + hns3_nic_irq_affinity_notify; + tqp_vectors->affinity_notify.release = + hns3_nic_irq_affinity_release; + irq_set_affinity_notifier(tqp_vectors->vector_irq, + &tqp_vectors->affinity_notify); + irq_set_affinity_hint(tqp_vectors->vector_irq, + &tqp_vectors->affinity_mask); + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; } @@ -195,8 +227,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) @@ -209,9 +239,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; - /* Default: disable RL */ - h->kinfo.int_rl_setting = 0; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; @@ -277,12 +304,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) static u16 hns3_get_max_available_channels(struct hnae3_handle *h) { - u16 free_tqps, max_rss_size, max_tqps; + u16 alloc_tqps, max_rss_size, rss_size; - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); + rss_size = alloc_tqps / h->kinfo.num_tc; - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + return min_t(u16, rss_size, max_rss_size); } static int hns3_nic_net_up(struct net_device *netdev) @@ -433,26 +460,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) { + flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + } else { + flags |= HNAE3_VLAN_FLTR; + if (netdev->flags & IFF_ALLMULTI) + flags |= HNAE3_USER_MPE; + } + + return flags; +} + static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; + int ret; - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, true, true); - else if (netdev->flags & IFF_ALLMULTI) - h->ae_algo->ops->set_promisc_mode(h, false, true); - else - h->ae_algo->ops->set_promisc_mode(h, false, false); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + new_flags = hns3_get_netdev_flags(netdev); + + ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + if (ret) { netdev_err(netdev, "sync uc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_UPE; + } + if (netdev->flags & IFF_MULTICAST) { - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, + hns3_nic_mc_unsync); + if (ret) { netdev_err(netdev, "sync mc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_MPE; + } + } - if (h->ae_algo->ops->update_mta_status) - h->ae_algo->ops->update_mta_status(h); + hns3_update_promisc_mode(netdev, new_flags); + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. MAC-VLAN Table overflow Promisc enabled and + * vlan fitering is enabled + */ + hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); + h->netdev_flags = new_flags; +} + +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + h->ae_algo->ops->set_promisc_mode(h, + promisc_flags & HNAE3_UPE, + promisc_flags & HNAE3_MPE); + } +} + +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool last_state; + + if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { + last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; + if (enable != last_state) { + netdev_info(netdev, + "%s vlan filter\n", + enable ? "enable" : "disable"); + h->ae_algo->ops->enable_vlan_filter(h, enable); + } } } @@ -896,35 +978,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) + int size, int frag_end, enum hns_desc_type type) { struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + struct device *dev = ring_to_dev(ring); u32 ol_type_vlan_len_msec = 0; u16 bdtp_fe_sc_vld_ra_ri = 0; + struct skb_frag_struct *frag; + unsigned int frag_buf_num; u32 type_cs_vlan_tso = 0; struct sk_buff *skb; u16 inner_vtag = 0; u16 out_vtag = 0; + unsigned int k; + int sizeoflast; u32 paylen = 0; + dma_addr_t dma; u16 mss = 0; u8 ol4_proto; u8 il4_proto; int ret; - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->length = size; - desc_cb->dma = dma; - desc_cb->type = type; - - /* now, fill the descriptor */ - desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); - if (type == DESC_TYPE_SKB) { skb = (struct sk_buff *)priv; paylen = skb->len; @@ -965,38 +1040,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc->tx.mss = cpu_to_le16(mss); desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); - } - /* move ring pointer to next.*/ - ring_ptr_move_fw(ring, next_to_use); + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else { + frag = (struct skb_frag_struct *)priv; + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } - return 0; -} + if (dma_mapping_error(ring->dev, dma)) { + ring->stats.sw_err_cnt++; + return -ENOMEM; + } -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - unsigned int frag_buf_num; - unsigned int k; - int sizeoflast; - int ret; + desc_cb->length = size; frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; - /* When the frag size is bigger than hardware, split this frag */ + /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - ret = hns3_fill_desc(ring, priv, - (k == frag_buf_num - 1) ? - sizeoflast : HNS3_MAX_BD_SIZE, - dma + HNS3_MAX_BD_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE); - if (ret) - return ret; + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; + desc_cb->type = (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); + desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, + frag_end && (k == frag_buf_num - 1) ? + 1 : 0); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + desc_cb = &ring->desc_cb[ring->next_to_use]; + desc = &ring->desc[ring->next_to_use]; } return 0; @@ -1044,7 +1128,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, /* No. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; - if (buf_num > ring_space(ring)) + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; *bnum = buf_num; @@ -1052,7 +1136,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, return 0; } -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) { struct device *dev = ring_to_dev(ring); unsigned int i; @@ -1068,12 +1152,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); - else + else if (ring->desc_cb[ring->next_to_use].length) dma_unmap_page(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); + ring->desc_cb[ring->next_to_use].length = 0; + /* rollback one */ ring_ptr_move_bw(ring, next_to_use); } @@ -1085,12 +1171,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct hns3_nic_ring_data *ring_data = &tx_ring_data(priv, skb->queue_mapping); struct hns3_enet_ring *ring = ring_data->ring; - struct device *dev = priv->dev; struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int next_to_use_head; int next_to_use_frag; - dma_addr_t dma; int buf_num; int seg_num; int size; @@ -1125,35 +1209,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX head DMA map failed\n"); - ring->stats.sw_err_cnt++; - goto out_err_tx_ok; - } - - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); + ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); if (ret) - goto head_dma_map_err; + goto head_fill_err; next_to_use_frag = ring->next_to_use; /* Fill the fragments */ for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); - ring->stats.sw_err_cnt++; - goto frag_dma_map_err; - } - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + + ret = priv->ops.fill_desc(ring, frag, size, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); if (ret) - goto frag_dma_map_err; + goto frag_fill_err; } /* Complete translate all packets */ @@ -1166,11 +1238,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -frag_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_frag); +frag_fill_err: + hns3_clear_desc(ring, next_to_use_frag); -head_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_head); +head_fill_err: + hns3_clear_desc(ring, next_to_use_head); out_err_tx_ok: dev_kfree_skb_any(skb); @@ -1209,6 +1281,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return 0; } +static int hns3_nic_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->do_ioctl) + return -EOPNOTSUPP; + + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1218,13 +1304,10 @@ static int hns3_nic_set_features(struct net_device *netdev, int ret; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; + else priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } } if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && @@ -1246,6 +1329,13 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { + if (features & NETIF_F_NTUPLE) + h->ae_algo->ops->enable_fd(h, true); + else + h->ae_algo->ops->enable_fd(h, false); + } + netdev->features = features; return 0; } @@ -1447,13 +1537,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) } ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { + if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); - return ret; - } - - netdev->mtu = new_mtu; + else + netdev->mtu = new_mtu; /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) @@ -1526,7 +1614,7 @@ static void hns3_nic_net_timeout(struct net_device *ndev) /* request the reset */ if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h); + h->ae_algo->ops->reset_event(h->pdev, h); } static const struct net_device_ops hns3_nic_netdev_ops = { @@ -1535,6 +1623,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_do_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_get_stats64 = hns3_nic_get_stats64, @@ -1584,6 +1673,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); } +static void hns3_get_dev_capability(struct pci_dev *pdev, + struct hnae3_ae_dev *ae_dev) +{ + if (pdev->revision >= 0x21) + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1609,6 +1705,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; ae_dev->dev_type = HNAE3_DEV_KNIC; + ae_dev->reset_type = HNAE3_NONE_RESET; + hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); hnae3_register_ae_dev(ae_dev); @@ -1662,12 +1760,72 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + pci_ers_result_t ret; + + dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (!ae_dev) { + dev_err(&pdev->dev, + "Can't recover - error happened during device init\n"); + return PCI_ERS_RESULT_NONE; + } + + if (ae_dev->ops->process_hw_error) + ret = ae_dev->ops->process_hw_error(ae_dev); + else + return PCI_ERS_RESULT_NONE; + + return ret; +} + +static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + dev_info(dev, "requesting reset due to PCI error\n"); + + /* request the reset */ + if (ae_dev->ops->reset_event) { + ae_dev->ops->reset_event(pdev, NULL); + return PCI_ERS_RESULT_RECOVERED; + } + + return PCI_ERS_RESULT_DISCONNECT; +} + +static const struct pci_error_handlers hns3_err_handler = { + .error_detected = hns3_error_detected, + .slot_reset = hns3_slot_reset, +}; + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .shutdown = hns3_shutdown, .sriov_configure = hns3_pci_sriov_configure, + .err_handler = &hns3_err_handler, }; /* set default feature to hns3 */ @@ -1682,7 +1840,7 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; @@ -1694,24 +1852,30 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; - if (pdev->revision != 0x20) + if (pdev->revision >= 0x21) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!(h->flags & HNAE3_SUPPORT_VF)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + } + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -1749,7 +1913,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, cb->length, ring_to_dma_dir(ring)); - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) return -EIO; return 0; @@ -1761,7 +1925,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring, if (cb->type == DESC_TYPE_SKB) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); - else + else if (cb->length) dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); } @@ -1912,9 +2076,10 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +void hns3_clean_tx_ring(struct hns3_enet_ring *ring) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; int head; @@ -1923,7 +2088,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) rmb(); /* Make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ + return; /* no data to poll */ if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, @@ -1932,16 +2097,15 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) u64_stats_update_begin(&ring->syncp); ring->stats.io_err_cnt++; u64_stats_update_end(&ring->syncp); - return true; + return; } bytes = 0; pkts = 0; - while (head != ring->next_to_clean && budget) { + while (head != ring->next_to_clean) { hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); /* Issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; } ring->tqp_vector->tx_group.total_bytes += bytes; @@ -1961,13 +2125,12 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) * sees the new next_to_clean. */ smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { netif_tx_wake_queue(dev_queue); ring->stats.restart_queue++; } } - - return !!budget; } static int hns3_desc_unused(struct hns3_enet_ring *ring) @@ -2092,7 +2255,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2121,6 +2283,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; + default: + break; } } @@ -2129,18 +2293,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } -static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, - struct hns3_desc *desc, u32 l234info) +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) { struct pci_dev *pdev = ring->tqp->handle->pdev; - u16 vlan_tag; if (pdev->revision == 0x20) { - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - return vlan_tag; + return (*vlan_tag != 0); } #define HNS3_STRP_OUTER_VLAN 0x1 @@ -2149,17 +2313,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; case HNS3_STRP_INNER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; default: - vlan_tag = 0; - break; + return false; } +} + +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + struct hnae3_handle *handle = ring->tqp->handle; + enum pkt_hash_types rss_type; - return vlan_tag; + if (le32_to_cpu(desc->rx.rss_hash)) + rss_type = handle->kinfo.rss_type; + else + rss_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, @@ -2261,16 +2437,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); - if (vlan_tag & VLAN_VID_MASK) + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); ring->stats.non_vld_descs++; u64_stats_update_end(&ring->syncp); @@ -2281,7 +2454,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (unlikely((!desc->rx.pkt_len) || hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); @@ -2291,7 +2463,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; u64_stats_update_end(&ring->syncp); @@ -2308,6 +2479,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; hns3_rx_checksum(ring, skb, desc); + hns3_set_rx_skb_rss_type(ring, skb); + return 0; } @@ -2501,10 +2674,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } + hns3_for_each_ring(ring, tqp_vector->tx_group) + hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ rx_budget = max(budget / tqp_vector->num_tqps, 1); @@ -2627,6 +2798,23 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, group->count++; } +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) +{ + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_tqp_vector *tqp_vector; + int num_vectors = priv->vector_num; + int numa_node; + int vector_i; + + numa_node = dev_to_node(&pdev->dev); + + for (vector_i = 0; vector_i < num_vectors; vector_i++) { + tqp_vector = &priv->tqp_vector[vector_i]; + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), + &tqp_vector->affinity_mask); + } +} + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; @@ -2635,6 +2823,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) int ret = 0; u16 i; + hns3_nic_set_cpumask(priv); + for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; hns3_vector_gl_rl_init_hw(tqp_vector, priv); @@ -3069,38 +3259,48 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } -static void hns3_uninit_mac_addr(struct net_device *netdev) +static int hns3_restore_fd_rules(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; - if (h->ae_algo->ops->rm_uc_addr) - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); + if (h->ae_algo->ops->restore_fd_rules) + ret = h->ae_algo->ops->restore_fd_rules(h); + + return ret; +} + +static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->del_all_fd_entries) + h->ae_algo->ops->del_all_fd_entries(h, clear_list); } static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + priv->ops.fill_desc = hns3_fill_desc; if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; + (netdev->features & NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; + else priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } } static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; + u16 alloc_tqps, max_rss_size; struct hns3_nic_priv *priv; struct net_device *netdev; int ret; - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - hns3_get_max_available_channels(handle)); + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, + &max_rss_size); + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); if (!netdev) return -ENOMEM; @@ -3189,9 +3389,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_remove_hw_addr(netdev); + if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_del_all_fd_rules(netdev, true); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); @@ -3210,8 +3414,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); - free_netdev(netdev); } @@ -3283,6 +3485,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } +static void hns3_remove_hw_addr(struct net_device *netdev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + hns3_nic_uc_unsync(netdev, netdev->dev_addr); + + /* go through and unsync uc_addr entries to the device */ + list = &netdev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_unsync(netdev, ha->addr); + + /* go through and unsync mc_addr entries to the device */ + list = &netdev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + if (ha->refcount > 1) + hns3_nic_mc_unsync(netdev, ha->addr); +} + static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -3419,6 +3640,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) return 0; } +static void hns3_store_coal(struct hns3_nic_priv *priv) +{ + /* ethtool only support setting and querying one coal + * configuation for now, so save the vector 0' coal + * configuation here in order to restore it. + */ + memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); +} + +static void hns3_restore_coal(struct hns3_nic_priv *priv) +{ + u16 vector_num = priv->vector_num; + int i; + + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, + sizeof(struct hns3_enet_coalesce)); + } +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -3452,19 +3698,27 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); + bool vlan_filter_enable; int ret; hns3_init_mac_addr(netdev, false); - hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + hns3_update_promisc_mode(netdev, handle->netdev_flags); + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) hns3_restore_vlan(netdev); + hns3_restore_fd_rules(netdev); + /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + hns3_restore_coal(priv); + ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3480,6 +3734,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; @@ -3492,11 +3747,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) return ret; } + hns3_store_coal(priv); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_uninit_mac_addr(netdev); + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(netdev); + hns3_del_all_fd_rules(netdev, false); + } return ret; } @@ -3526,24 +3790,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static void hns3_restore_coal(struct hns3_nic_priv *priv, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) -{ - u16 vector_num = priv->vector_num; - int i; - - for (i = 0; i < vector_num; i++) { - memcpy(&priv->tqp_vector[i].tx_group.coal, tx, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->tqp_vector[i].rx_group.coal, rx, - sizeof(struct hns3_enet_coalesce)); - } -} - -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3561,7 +3808,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, if (ret) goto err_alloc_vector; - hns3_restore_coal(priv, tx, rx); + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) @@ -3593,7 +3840,6 @@ int hns3_set_channels(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - struct hns3_enet_coalesce tx_coal, rx_coal; bool if_running = netif_running(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; @@ -3625,15 +3871,7 @@ int hns3_set_channels(struct net_device *netdev, goto open_netdev; } - /* Changing the tqp num may also change the vector num, - * ethtool only support setting and querying one coal - * configuation for now, so save the vector 0' coal - * configuation here in order to restore it. - */ - memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, - sizeof(struct hns3_enet_coalesce)); + hns3_store_coal(priv); hns3_nic_dealloc_vector_data(priv); @@ -3641,10 +3879,9 @@ int hns3_set_channels(struct net_device *netdev, hns3_put_ring_config(priv); org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, new_tqp_num); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num, - &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, org_tqp_num); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index cb450d7ec8c1..71cfca132d0b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -419,8 +419,7 @@ struct hns3_nic_ring_data { struct hns3_nic_ops { int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type); + int size, int frag_end, enum hns_desc_type type); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring); void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); @@ -491,7 +490,9 @@ struct hns3_enet_tqp_vector { struct hns3_enet_ring_group rx_group; struct hns3_enet_ring_group tx_group; + cpumask_t affinity_mask; u16 num_tqps; /* total number of tqps in TQP vector */ + struct irq_affinity_notify affinity_notify; char name[HNAE3_INT_NAME_LEN]; @@ -541,6 +542,8 @@ struct hns3_nic_priv { /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct hns3_enet_coalesce tx_coal; + struct hns3_enet_coalesce rx_coal; }; union l3_hdr_info { @@ -581,6 +584,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } +static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) +{ + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); +} + #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) @@ -615,7 +623,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch); -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +void hns3_clean_tx_ring(struct hns3_enet_ring *ring); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); @@ -631,6 +639,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee2..a4762c2b8ba1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -22,13 +22,13 @@ struct hns3_stats { static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), HNS3_TQP_STAT("bytes", tx_bytes), HNS3_TQP_STAT("errors", tx_err_cnt), - HNS3_TQP_STAT("tx_wake", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), + HNS3_TQP_STAT("wake", restart_queue), + HNS3_TQP_STAT("busy", tx_busy), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", rx_pkts), HNS3_TQP_STAT("bytes", rx_bytes), @@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 2 +#define HNS3_SELF_TEST_TYPE_NUM 3 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -71,6 +71,7 @@ struct hns3_link_mode_mapping { static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); + bool vlan_filter_enable; int ret; if (!h->ae_algo->ops->set_loopback || @@ -78,8 +79,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) return -EOPNOTSUPP; switch (loop) { - case HNAE3_MAC_INTER_LOOP_SERDES: - case HNAE3_MAC_INTER_LOOP_MAC: + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + case HNAE3_LOOP_APP: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -90,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en, en); + if (en) { + h->ae_algo->ops->set_promisc_mode(h, true, true); + } else { + /* recover promisc mode before loopback test */ + hns3_update_promisc_mode(ndev, h->netdev_flags); + vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(ndev, vlan_filter_enable); + } return ret; } @@ -100,41 +109,26 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - ret = hns3_nic_reset_all_ring(h); if (ret) return ret; - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return ret; + return 0; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { - struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; } - h->ae_algo->ops->stop(h); usleep_range(10000, 20000); return 0; @@ -152,6 +146,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + ethh->h_dest[5] += 0x1f; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -214,7 +209,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, for (i = start_ringid; i <= end_ringid; i++) { struct hns3_enet_ring *ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, budget); + hns3_clean_tx_ring(ring); } } @@ -300,16 +295,21 @@ static void hns3_self_test(struct net_device *ndev, if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; + st_param[HNAE3_LOOP_APP][1] = + h->flags & HNAE3_SUPPORT_APP_LOOPBACK; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = - h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; + st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; + st_param[HNAE3_LOOP_SERIAL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + + st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = + HNAE3_LOOP_PARALLEL_SERDES; + st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev, #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -365,9 +365,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); - } - return 0; + default: + return -EOPNOTSUPP; + } } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, @@ -383,7 +384,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", + n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_", prefix, i); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); size_left = (ETH_GSTRING_LEN - 1) - n1; @@ -431,6 +432,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + default: + break; } } @@ -556,30 +559,72 @@ static int hns3_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } +static void hns3_get_ksettings(struct hnae3_handle *h, + struct ethtool_link_ksettings *cmd) +{ + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + + /* 2.get link mode*/ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - u32 flowctrl_adv = 0; + const struct hnae3_ae_ops *ops; + u8 media_type; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) { + ops = h->ae_algo->ops; + if (ops->get_media_type) + ops->get_media_type(h, &media_type); + else + return -EOPNOTSUPP; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_NONE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_COPPER: + if (!netdev->phydev) + return -EOPNOTSUPP; + + cmd->base.port = PORT_TP; phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + + netdev_warn(netdev, "Unknown media type"); return 0; } - if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; link_stat = hns3_get_link(netdev); if (!link_stat) { @@ -587,36 +632,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.get link mode and port type*/ - if (h->ae_algo->ops->get_link_mode) - h->ae_algo->ops->get_link_mode(h, - cmd->link_modes.supported, - cmd->link_modes.advertising); - - cmd->base.port = PORT_NONE; - if (h->ae_algo->ops->get_port_type) - h->ae_algo->ops->get_port_type(h, - &cmd->base.port); - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - /* 5.get flow control setttings */ - if (h->ae_algo->ops->get_flowctrl_adv) - h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); - - if (flowctrl_adv & ADVERTISED_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Pause); - - if (flowctrl_adv & ADVERTISED_Asym_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); - return 0; } @@ -671,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) return -EOPNOTSUPP; - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); + if ((h->pdev->revision == 0x20 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); return -EOPNOTSUPP; } + if (!indir) { netdev_err(netdev, "set rss failed for indir is empty\n"); @@ -692,20 +708,33 @@ static int hns3_get_rxnfc(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.rss_size; - break; + cmd->data = h->kinfo.num_tqps; + return 0; case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); + if (h->ae_algo->ops->get_rss_tuple) + return h->ae_algo->ops->get_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLCNT: + if (h->ae_algo->ops->get_fd_rule_cnt) + return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRULE: + if (h->ae_algo->ops->get_fd_rule_info) + return h->ae_algo->ops->get_fd_rule_info(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLALL: + if (h->ae_algo->ops->get_fd_all_rules) + return h->ae_algo->ops->get_fd_all_rules(h, cmd, + rule_locs); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } - - return 0; } static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, @@ -788,12 +817,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); + if (h->ae_algo->ops->set_rss_tuple) + return h->ae_algo->ops->set_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLINS: + if (h->ae_algo->ops->add_fd_entry) + return h->ae_algo->ops->add_fd_entry(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLDEL: + if (h->ae_algo->ops->del_fd_entry) + return h->ae_algo->ops->del_fd_entry(h, cmd); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } @@ -1047,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index cb8ddd043476..580e81743681 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -6,6 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 821d4c2f84bd..872cd4bdd70d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -175,21 +175,22 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - - /* Multicast linear table commands */ - HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, - HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, - HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, - HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, @@ -208,6 +209,28 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + + /* Error INT commands */ + HCLGE_TM_SCH_ECC_INT_EN = 0x0829, + HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d, + HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f, + HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830, + HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831, + HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833, + HCLGE_COMMON_ECC_INT_CFG = 0x1505, + HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802, + HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, + HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804, + HCLGE_IGU_COMMON_INT_QUERY = 0x1805, + HCLGE_IGU_COMMON_INT_EN = 0x1806, + HCLGE_IGU_COMMON_INT_CLR = 0x1807, + HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, + HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17, + HCLGE_PPP_CMD0_INT_CMD = 0x2100, + HCLGE_PPP_CMD1_INT_CMD = 0x2101, + HCLGE_NCSI_INT_QUERY = 0x2400, + HCLGE_NCSI_INT_EN = 0x2401, + HCLGE_NCSI_INT_CLR = 0x2402, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -395,6 +418,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_SPEED_ABILITY_S 0 #define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) struct hclge_cfg_param_cmd { __le32 offset; @@ -584,13 +609,12 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0 -struct hclge_mac_vlan_mask_entry_cmd { - u8 rsv0[2]; - u8 vlan_mask; - u8 rsv1; - u8 mac_mask[6]; - u8 rsv2[14]; +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; }; #define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) @@ -615,30 +639,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0 -#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 7 -struct hclge_mta_filter_mode_cmd { - u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ - u8 rsv[23]; -}; - -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 -struct hclge_cfg_func_mta_filter_cmd { - u8 accept; /* Only used lowest 1 bit */ - u8 function_id; - u8 rsv[22]; -}; - -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0 -#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item_cmd { - __le16 item_idx; /* Only used lowest 12 bit */ - u8 accept; /* Only used lowest 1 bit */ - u8 rsv[21]; -}; - struct hclge_mac_vlan_add_cmd { __le16 flags; __le16 mac_addr_hi16; @@ -778,6 +778,7 @@ struct hclge_reset_cmd { }; #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) #define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) struct hclge_serdes_lb_cmd { @@ -818,6 +819,76 @@ struct hclge_set_led_state_cmd { u8 rsv2[20]; }; +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(12, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f08ebb7caaaf..e72f724123d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { + bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; u8 i; @@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, *changed = true; total_ets_bw += ets->tc_tx_bw[i]; - break; + has_ets_tc = true; + break; default: return -EINVAL; } } - if (total_ets_bw != BW_PERCENT) + if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; *tc = max_tc + 1; @@ -182,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; - hclge_tm_schd_info_update(hdev, num_tc); + ret = hclge_tm_schd_info_update(hdev, num_tc); + if (ret) + return ret; ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -308,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) return -EINVAL; } - hclge_tm_schd_info_update(hdev, tc); + ret = hclge_tm_schd_info_update(hdev, tc); + if (ret) + return ret; ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c new file mode 100644 index 000000000000..f7e363b90fe0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -0,0 +1,1088 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#include "hclge_err.h" + +static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { + { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = { + { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { + { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = { + { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { + { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" }, + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_com_err_int[] = { + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { + { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow" }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun" }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ncsi_err_int[] = { + { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int0[] = { + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" }, + { .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_1bit_err" }, + { .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_1bit_err" }, + { .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_1bit_err" }, + { .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" }, + { .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err" }, + { .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err" }, + { .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err" }, + { .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_pf_int[] = { + { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int2[] = { + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" }, + { /* sentinel */ } +}; + +struct hclge_tm_sch_ecc_info { + const char *name; +}; + +static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = { + { + { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" }, + }, + { + { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" }, + { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" }, + { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" }, + { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" }, + { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" }, + }, + { + { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" }, + { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" }, + { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" }, + { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" }, + { .name = "NIC_QUEUE_CTRL:QCLEN TAB" }, + }, + { + { .name = "RAM_CFG_CTRL:CSHAP TAB" }, + { .name = "RAM_CFG_CTRL:PSHAP TAB" }, + }, + { + { .name = "SHAPER_CTRL:PSHAP TAB" }, + }, + { + { .name = "MSCH_CTRL" }, + }, + { + { .name = "TOP_CTRL" }, + }, +}; + +static const struct hclge_hw_error hclge_tm_sch_err_int[] = { + { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = { + { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static void hclge_log_error(struct device *dev, + const struct hclge_hw_error *err_list, + u32 err_sts) +{ + const struct hclge_hw_error *err; + int i = 0; + + while (err_list[i].msg) { + err = &err_list[i]; + if (!(err->int_msk & err_sts)) { + i++; + continue; + } + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err_sts); + i++; + } +} + +/* hclge_cmd_query_error: read the error information + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @cmd: command opcode + * @flag: flag for extended command structure + * @w_num: offset for setting the read interrupt type. + * @int_type: select which type of the interrupt for which the error + * info will be read(RAS-CE/RAS-NFE/RAS-FE etc). + * + * This function query the error info from hw register/s using command + */ +static int hclge_cmd_query_error(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 cmd, + u16 flag, u8 w_num, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + int num = 1; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + num = 2; + } + if (w_num) + desc[0].data[w_num] = cpu_to_le32(int_type); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "query error cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_cmd_clear_error: clear the error status + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @desc_src: prefilled descriptor from the previous command for reusing + * @cmd: command opcode + * @flag: flag for extended command structure + * + * This function clear the error status in the hw register/s using command + */ +static int hclge_cmd_clear_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + struct hclge_desc *desc_src, + u32 cmd, u16 flag) +{ + struct device *dev = &hdev->pdev->dev; + int num = 1; + int ret, i; + + if (cmd) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + num = 2; + } + if (desc_src) { + for (i = 0; i < 6; i++) { + desc[0].data[i] = desc_src[0].data[i]; + if (flag) + desc[1].data[i] = desc_src[1].data[i]; + } + } + } else { + hclge_cmd_reuse_desc(&desc[0], false); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_reuse_desc(&desc[1], false); + num = 2; + } + } + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear error cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_enable_common_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); + + if (en) { + /* enable COMMON error interrupts */ + desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); + desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); + desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN); + desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); + } else { + /* disable COMMON error interrupts */ + desc[0].data[0] = 0; + desc[0].data[2] = 0; + desc[0].data[3] = 0; + desc[0].data[4] = 0; + desc[0].data[5] = 0; + } + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable COMMON err interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->pdev->revision < 0x21) + return 0; + + /* enable/disable NCSI error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); + else + desc.data[0] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable NCSI error interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* enable/disable error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN); + else + desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable IGU common interrupts\n", + ret); + return ret; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); + else + desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable IGU-EGU TNL interrupts\n", + ret); + return ret; + } + + ret = hclge_enable_ncsi_error(hdev, en); + if (ret) + dev_err(dev, "fail(%d) to en/disable err int\n", ret); + + return ret; +} + +static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* enable/disable PPP error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); + } else { + desc[0].data[0] = 0; + desc[0].data[1] = 0; + } + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); + } else { + desc[0].data[0] = 0; + desc[0].data[1] = 0; + } + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable PPP error interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + en); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable PPP error intr 0,1\n", + ret); + return ret; + } + + ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + en); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable PPP error intr 2,3\n", + ret); + + return ret; +} + +int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* enable TM SCH hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); + else + desc.data[0] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret); + return ret; + } + + /* enable TM QCN hw errors */ + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, + 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret); + return ret; + } + + hclge_cmd_reuse_desc(&desc, false); + if (en) + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); + else + desc.data[1] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "failed(%d) to configure TM QCN mem errors\n", ret); + + return ret; +} + +static void hclge_process_common_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + u32 err_sts; + int ret; + + /* read err sts */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_COMMON_ECC_INT_CFG, + HCLGE_CMD_FLAG_NEXT, 0, 0); + if (ret) { + dev_err(dev, + "failed(=%d) to query COMMON error interrupt status\n", + ret); + return; + } + + /* log err */ + err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK; + hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK; + hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT) + & HCLGE_CMDQ_ECC_INT_MASK; + hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts); + + if ((le32_to_cpu(desc[0].data[3])) & BIT(0)) + dev_warn(dev, "imp_rd_data_poison_err found\n"); + + err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) & + HCLGE_TQP_ECC_INT_MASK; + hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[5])) & + HCLGE_IMP_ITCM4_ECC_INT_MASK; + hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts); + + /* clear error interrupts */ + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK | + HCLGE_CMDQ_ROCEE_ECC_CLR_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK); + + ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, + HCLGE_CMD_FLAG_NEXT); + if (ret) + dev_err(dev, + "failed(%d) to clear COMMON error interrupt status\n", + ret); +} + +static void hclge_process_ncsi_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_rd; + struct hclge_desc desc_wr; + u32 err_sts; + int ret; + + if (hdev->pdev->revision < 0x21) + return; + + /* read NCSI error status */ + ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY, + 0, 1, HCLGE_NCSI_ERR_INT_TYPE); + if (ret) { + dev_err(dev, + "failed(=%d) to query NCSI error interrupt status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]); + hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_NCSI_INT_CLR, 0); + if (ret) + dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n", + ret); +} + +static void hclge_process_igu_egu_error(struct hclge_dev *hdev, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_rd; + struct hclge_desc desc_wr; + u32 err_sts; + int ret; + + /* read IGU common err sts */ + ret = hclge_cmd_query_error(hdev, &desc_rd, + HCLGE_IGU_COMMON_INT_QUERY, + 0, 1, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query IGU common int status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]) & + HCLGE_IGU_COM_INT_MASK; + hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_IGU_COMMON_INT_CLR, 0); + if (ret) { + dev_err(dev, "failed(=%d) to clear IGU common int status\n", + ret); + return; + } + + /* read IGU-EGU TNL err sts */ + ret = hclge_cmd_query_error(hdev, &desc_rd, + HCLGE_IGU_EGU_TNL_INT_QUERY, + 0, 1, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]) & + HCLGE_IGU_EGU_TNL_INT_MASK; + hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_IGU_EGU_TNL_INT_CLR, 0); + if (ret) { + dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n", + ret); + return; + } + + hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE); +} + +static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, + enum hclge_err_int_type int_type) +{ + enum hnae3_reset_type reset_level = HNAE3_NONE_RESET; + struct device *dev = &hdev->pdev->dev; + const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3; + struct hclge_desc desc[2]; + u32 err_sts; + int ret; + + /* read PPP INT sts */ + ret = hclge_cmd_query_error(hdev, &desc[0], cmd, + HCLGE_CMD_FLAG_NEXT, 5, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query PPP interrupt status\n", + ret); + return -EIO; + } + + /* log error */ + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + hw_err_lst1 = &hclge_ppp_mpf_int0[0]; + hw_err_lst2 = &hclge_ppp_mpf_int1[0]; + hw_err_lst3 = &hclge_ppp_pf_int[0]; + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + hw_err_lst1 = &hclge_ppp_mpf_int2[0]; + hw_err_lst2 = &hclge_ppp_mpf_int3[0]; + } else { + dev_err(dev, "invalid command(=%d)\n", cmd); + return -EINVAL; + } + + err_sts = le32_to_cpu(desc[0].data[2]); + if (err_sts) { + hclge_log_error(dev, hw_err_lst1, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + err_sts = le32_to_cpu(desc[0].data[3]); + if (err_sts) { + hclge_log_error(dev, hw_err_lst2, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3; + if (err_sts) { + hclge_log_error(dev, hw_err_lst3, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + /* clear PPP INT */ + ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, + HCLGE_CMD_FLAG_NEXT); + if (ret) { + dev_err(dev, "failed(=%d) to clear PPP interrupt status\n", + ret); + return -EIO; + } + + return 0; +} + +static void hclge_process_ppp_error(struct hclge_dev *hdev, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + /* read PPP INT0,1 sts */ + ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD, + int_type); + if (ret < 0) { + dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n", + ret); + return; + } + + /* read err PPP INT2,3 sts */ + ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD, + int_type); + if (ret < 0) + dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n", + ret); +} + +static void hclge_process_tm_sch_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info; + struct hclge_desc desc; + u32 ecc_info; + u8 module_no; + u8 ram_no; + int ret; + + /* read TM scheduler errors */ + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret); + return; + } + ecc_info = le32_to_cpu(desc.data[0]); + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret); + return; + } + + /* log TM scheduler errors */ + if (le32_to_cpu(desc.data[0])) { + hclge_log_error(dev, &hclge_tm_sch_err_int[0], + le32_to_cpu(desc.data[0])); + if (le32_to_cpu(desc.data[0]) & 0x2) { + module_no = (ecc_info >> 20) & 0xF; + ram_no = (ecc_info >> 16) & 0xF; + tm_sch_ecc_info = + &hclge_tm_sch_ecc_err[module_no][ram_no]; + dev_warn(dev, "ecc err module:ram=%s\n", + tm_sch_ecc_info->name); + dev_warn(dev, "ecc memory address = 0x%x\n", + ecc_info & 0xFFFF); + } + } + + /* clear TM scheduler errors */ + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH CE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH NFE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH FE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) + dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret); +} + +static void hclge_process_tm_qcn_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* read QCN errors */ + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret); + return; + } + + /* log QCN errors */ + if (le32_to_cpu(desc.data[0])) + hclge_log_error(dev, &hclge_qcn_ecc_err_int[0], + le32_to_cpu(desc.data[0])); + + /* clear QCN errors */ + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) + dev_err(dev, "failed(%d) to clear QCN error status\n", ret); +} + +static void hclge_process_tm_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + hclge_process_tm_sch_error(hdev); + hclge_process_tm_qcn_error(hdev); +} + +static const struct hclge_hw_blk hw_blk[] = { + { .msk = BIT(0), .name = "IGU_EGU", + .enable_error = hclge_enable_igu_egu_error, + .process_error = hclge_process_igu_egu_error, }, + { .msk = BIT(5), .name = "COMMON", + .enable_error = hclge_enable_common_error, + .process_error = hclge_process_common_error, }, + { .msk = BIT(4), .name = "TM", + .enable_error = hclge_enable_tm_hw_error, + .process_error = hclge_process_tm_error, }, + { .msk = BIT(1), .name = "PPP", + .enable_error = hclge_enable_ppp_error, + .process_error = hclge_process_ppp_error, }, + { /* sentinel */ } +}; + +int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state) +{ + struct device *dev = &hdev->pdev->dev; + int ret = 0; + int i = 0; + + while (hw_blk[i].name) { + if (!hw_blk[i].enable_error) { + i++; + continue; + } + ret = hw_blk[i].enable_error(hdev, state); + if (ret) { + dev_err(dev, "fail(%d) to en/disable err int\n", ret); + return ret; + } + i++; + } + + return ret; +} + +pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 sts, val; + int i = 0; + + sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Processing Non-fatal errors */ + if (sts & HCLGE_RAS_REG_NFE_MASK) { + val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF; + i = 0; + while (hw_blk[i].name) { + if (!(hw_blk[i].msk & val)) { + i++; + continue; + } + dev_warn(dev, "%s ras non-fatal error identified\n", + hw_blk[i].name); + if (hw_blk[i].process_error) + hw_blk[i].process_error(hdev, + HCLGE_ERR_INT_RAS_NFE); + i++; + } + } + + return PCI_ERS_RESULT_NEED_RESET; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h new file mode 100644 index 000000000000..e0e3b5861495 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_ERR_H +#define __HCLGE_ERR_H + +#include "hclge_main.h" + +#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 +#define HCLGE_RAS_REG_FE_MASK 0xFF +#define HCLGE_RAS_REG_NFE_MASK 0xFF00 +#define HCLGE_RAS_REG_NFE_SHIFT 8 + +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300 +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 +#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF +#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_IGU_ERR_INT_EN 0x0000066F +#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F +#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF +#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_PF_ERR_INT_EN 0x0003 +#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F +#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 +#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF +#define HCLGE_NCSI_ERR_INT_EN 0x3 +#define HCLGE_NCSI_ERR_INT_TYPE 0x9 + +#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF +#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3 +#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF +#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16 +#define HCLGE_TQP_ECC_INT_MASK 0xFFF +#define HCLGE_TQP_ECC_INT_SHIFT 16 +#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF +#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3 +#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000 +#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001 +#define HCLGE_IGU_COM_INT_MASK 0xF +#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F +#define HCLGE_PPP_PF_INT_MASK 0x100 + +enum hclge_err_int_type { + HCLGE_ERR_INT_MSIX = 0, + HCLGE_ERR_INT_RAS_CE = 1, + HCLGE_ERR_INT_RAS_NFE = 2, + HCLGE_ERR_INT_RAS_FE = 3, +}; + +struct hclge_hw_blk { + u32 msk; + const char *name; + int (*enable_error)(struct hclge_dev *hdev, bool en); + void (*process_error)(struct hclge_dev *hdev, + enum hclge_err_int_type type); +}; + +struct hclge_hw_error { + u32 int_msk; + const char *msg; +}; + +int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); +int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en); +pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad..5234b5373ed3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -19,20 +19,18 @@ #include "hclge_mbx.h" #include "hclge_mdio.h" #include "hclge_tm.h" +#include "hclge_err.h" #include "hnae3.h" #define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) -#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable); static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc); static struct hnae3_ae_algo ae_algo; @@ -51,175 +49,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { - "Mac Loopback test", - "Serdes Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", "Phy Loopback test" }; -static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { - {"igu_rx_oversize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, - {"igu_rx_undersize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, - {"igu_rx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, - {"igu_rx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, - {"igu_rx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, - {"igu_rx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, - {"egu_tx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, - {"egu_tx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, - {"egu_tx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, - {"egu_tx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, - {"ssu_ppp_mac_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, - {"ssu_ppp_host_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, - {"ppp_ssu_mac_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, - {"ppp_ssu_host_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, - {"ssu_tx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, - {"ssu_tx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, - {"ssu_rx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, - {"ssu_rx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} -}; - -static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { - {"igu_rx_err_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, - {"igu_rx_no_eof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, - {"igu_rx_no_sof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, - {"egu_tx_1588_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, - {"ssu_full_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, - {"ssu_part_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, - {"ppp_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, - {"ppp_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, - {"ssu_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, - {"pkt_curr_buf_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, - {"qcn_fb_rcv_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, - {"qcn_fb_drop_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, - {"qcn_fb_invaild_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, - {"rx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, - {"rx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, - {"rx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, - {"rx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, - {"rx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, - {"rx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, - {"rx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, - {"rx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, - {"rx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, - {"rx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, - {"rx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, - {"rx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, - {"rx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, - {"rx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, - {"rx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, - {"rx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, - {"tx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, - {"tx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, - {"tx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, - {"tx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, - {"tx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, - {"tx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, - {"tx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, - {"tx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, - {"tx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, - {"tx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, - {"tx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, - {"tx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, - {"tx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, - {"tx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, - {"tx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, - {"tx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, - {"pkt_curr_buf_tc0_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, - {"pkt_curr_buf_tc1_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, - {"pkt_curr_buf_tc2_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, - {"pkt_curr_buf_tc3_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, - {"pkt_curr_buf_tc4_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, - {"pkt_curr_buf_tc5_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, - {"pkt_curr_buf_tc6_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, - {"pkt_curr_buf_tc7_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, - {"mb_uncopy_num", - HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, - {"lo_pri_unicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, - {"hi_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, - {"lo_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, - {"rx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, - {"tx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, - {"nic_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, - {"roc_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} -}; - static const struct hclge_comm_stats_str g_mac_stats_string[] = { {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, @@ -394,109 +229,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static int hclge_64_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_64_BIT_CMD_NUM 5 -#define HCLGE_64_BIT_RTN_DATANUM 4 - u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); - struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; - __le64 *desc_data; - int i, k, n; - int ret; - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 64 bit pkt stats fail, status = %d.\n", ret); - return ret; - } - - for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_RTN_DATANUM - 1; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_64_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - -static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) -{ - stats->pkt_curr_buf_cnt = 0; - stats->pkt_curr_buf_tc0_cnt = 0; - stats->pkt_curr_buf_tc1_cnt = 0; - stats->pkt_curr_buf_tc2_cnt = 0; - stats->pkt_curr_buf_tc3_cnt = 0; - stats->pkt_curr_buf_tc4_cnt = 0; - stats->pkt_curr_buf_tc5_cnt = 0; - stats->pkt_curr_buf_tc6_cnt = 0; - stats->pkt_curr_buf_tc7_cnt = 0; -} - -static int hclge_32_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_32_BIT_CMD_NUM 8 -#define HCLGE_32_BIT_RTN_DATANUM 8 - - struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; - struct hclge_32_bit_stats *all_32_bit_stats; - __le32 *desc_data; - int i, k, n; - u64 *data; - int ret; - - all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; - data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 32 bit pkt stats fail, status = %d.\n", ret); - - return ret; - } - - hclge_reset_partial_32bit_counter(all_32_bit_stats); - for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - __le16 *desc_data_16bit; - - all_32_bit_stats->igu_rx_err_pkt += - le32_to_cpu(desc[i].data[0]); - - desc_data_16bit = (__le16 *)&desc[i].data[1]; - all_32_bit_stats->igu_rx_no_eof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data_16bit++; - all_32_bit_stats->igu_rx_no_sof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data = &desc[i].data[2]; - n = HCLGE_32_BIT_RTN_DATANUM - 4; - } else { - desc_data = (__le32 *)&desc[i]; - n = HCLGE_32_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le32_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -623,7 +355,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -631,7 +363,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -675,14 +407,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, struct net_device_stats *net_stats) { net_stats->tx_dropped = 0; - net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; @@ -717,12 +443,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } @@ -743,18 +463,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - - status = hclge_64_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 64 bit stats fail, status = %d.\n", - status); - status = hclge_tqps_update_stats(handle); if (status) dev_err(&hdev->pdev->dev, @@ -768,7 +476,10 @@ static void hclge_update_stats(struct hnae3_handle *handle, static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ + HNAE3_SUPPORT_PHY_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -782,19 +493,19 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) if (stringset == ETH_SS_TEST) { /* clear loopback bit flags at first */ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); - if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + if (hdev->pdev->revision >= 0x21 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { count += 1; - handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count++; - handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK; + count += 2; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + - ARRAY_SIZE(g_all_32bit_stats_string) + - ARRAY_SIZE(g_all_64bit_stats_string) + hclge_tqps_get_sset_count(handle, stringset); } @@ -814,33 +525,29 @@ static void hclge_get_strings(struct hnae3_handle *handle, g_mac_stats_string, size, p); - size = ARRAY_SIZE(g_all_32bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_32bit_stats_string, - size, - p); - size = ARRAY_SIZE(g_all_64bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_64bit_stats_string, - size, - p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { - if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + hns3_nic_test_strs[HNAE3_LOOP_PHY], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -857,14 +564,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); - p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, - g_all_32bit_stats_string, - ARRAY_SIZE(g_all_32bit_stats_string), - p); - p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, - g_all_64bit_stats_string, - ARRAY_SIZE(g_all_64bit_stats_string), - p); p = hclge_tqps_get_stats(handle, p); } @@ -1079,6 +778,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_S); + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } /* hclge_get_cfg: query the static parameter from flash @@ -1157,6 +861,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; + hdev->wanted_umv_size = cfg.umv_space; ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { @@ -1657,11 +1362,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size; +#define HCLGE_BUF_SIZE_UNIT 128 + u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; + aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); rx_all -= hclge_get_tx_buff_alloced(buf_alloc); /* When DCB is not supported, rx private @@ -1680,13 +1387,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->hw_tc_map & BIT(i)) { priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = hdev->mps; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.low = aligned_mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = 2 * hdev->mps; + priv->wl.high = 2 * aligned_mps; priv->buf_size = priv->wl.high; } } else { @@ -1718,11 +1425,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = hdev->mps; + priv->wl.high = aligned_mps; priv->buf_size = priv->wl.high; } } @@ -2066,19 +1773,17 @@ static int hclge_init_msi(struct hclge_dev *hdev) return 0; } -static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +static u8 hclge_check_speed_dup(u8 duplex, int speed) { - struct hclge_mac *mac = &hdev->hw.mac; - if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) - mac->duplex = (u8)duplex; - else - mac->duplex = HCLGE_MAC_FULL; + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; - mac->speed = speed; + return duplex; } -int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; @@ -2138,7 +1843,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) return ret; } - hclge_check_speed_dup(hdev, duplex, speed); + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; return 0; } @@ -2224,42 +1945,17 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } -static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, - bool mask_vlan, - u8 *mac_mask) -{ - struct hclge_mac_vlan_mask_entry_cmd *req; - struct hclge_desc desc; - int status; - - req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - - hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); - ether_addr_copy(req->mac_mask, mac_mask); - - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Config mac_vlan_mask failed for cmd_send, ret =%d\n", - status); - - return status; -} - static int hclge_mac_init(struct hclge_dev *hdev) { struct hnae3_handle *handle = &hdev->vport[0].nic; struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - struct hclge_vport *vport; int mtu; int ret; - int i; - ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex); if (ret) { dev_err(&hdev->pdev->dev, "Config mac speed dup fail ret=%d\n", ret); @@ -2268,39 +1964,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - /* Initialize the MTA table work mode */ - hdev->enable_mta = true; - hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; - - ret = hclge_set_mta_filter_mode(hdev, - hdev->mta_mac_sel_type, - hdev->enable_mta); - if (ret) { - dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", - ret); - return ret; - } - - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->accept_mta_mc = false; - - memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); - ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; - } - } - - ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); - if (ret) { - dev_err(&hdev->pdev->dev, - "set default mac_vlan_mask fail ret=%d\n", ret); - return ret; - } - if (netdev) mtu = netdev->mtu; else @@ -2360,10 +2023,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) int mac_state; int link_stat; + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + mac_state = hclge_get_mac_link_status(hdev); if (hdev->hw.mac.phydev) { - if (!genphy_read_status(hdev->hw.mac.phydev)) + if (hdev->hw.mac.phydev->state == PHY_RUNNING) link_stat = mac_state & hdev->hw.mac.phydev->link; else @@ -2415,13 +2081,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) return ret; } - if ((mac.speed != speed) || (mac.duplex != duplex)) { - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); - return ret; - } + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; } return 0; @@ -2520,6 +2184,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, case HCLGE_VECTOR0_EVENT_MBX: hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); break; + default: + break; } } @@ -2793,8 +2459,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_handle *handle; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; /* perform reset of the stack & ae device for a client */ handle = &hdev->vport[0].nic; rtnl_lock(); @@ -2815,14 +2486,21 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_notify_client(hdev, HNAE3_UP_CLIENT); handle->last_reset_time = jiffies; rtnl_unlock(); + ae_dev->reset_type = HNAE3_NONE_RESET; } -static void hclge_reset_event(struct hnae3_handle *handle) +static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; - /* check if this is a new reset request and we are not here just because + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * For the first case,error event might not have ae handle available. + * check if this is a new reset request and we are not here just because * last reset attempt did not succeed and watchdog hit us again. We will * know this if last reset request did not occur very recently (watchdog * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) @@ -2831,6 +2509,9 @@ static void hclge_reset_event(struct hnae3_handle *handle) * want to make sure we throttle the reset request. Therefore, we will * not allow it again before 3*HZ times. */ + if (!handle) + handle = &hdev->vport[0].nic; + if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) return; else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) @@ -3102,6 +2783,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, return ret; } +static void hclge_get_rss_type(struct hclge_vport *vport) +{ + if (vport->rss_tuple_sets.ipv4_tcp_en || + vport->rss_tuple_sets.ipv4_udp_en || + vport->rss_tuple_sets.ipv4_sctp_en || + vport->rss_tuple_sets.ipv6_tcp_en || + vport->rss_tuple_sets.ipv6_udp_en || + vport->rss_tuple_sets.ipv6_sctp_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (vport->rss_tuple_sets.ipv4_fragment_en || + vport->rss_tuple_sets.ipv6_fragment_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; + else + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { struct hclge_rss_input_tuple_cmd *req; @@ -3121,6 +2818,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; + hclge_get_rss_type(&hdev->vport[0]); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -3135,8 +2833,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, int i; /* Get hash algorithm */ - if (hfunc) - *hfunc = vport->rss_algo; + if (hfunc) { + switch (vport->rss_algo) { + case HCLGE_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } /* Get the RSS Key required by the user */ if (key) @@ -3160,12 +2869,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - - if (hfunc == ETH_RSS_HASH_TOP || - hfunc == ETH_RSS_HASH_NO_CHANGE) + switch (hfunc) { + case ETH_RSS_HASH_TOP: hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - else + break; + case ETH_RSS_HASH_XOR: + hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + hash_algo = vport->rss_algo; + break; + default: return -EINVAL; + } + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; @@ -3283,6 +3000,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + hclge_get_rss_type(vport); return 0; } @@ -3608,6 +3326,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, hclge_cmd_set_promisc_mode(hdev, ¶m); } +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) +{ + struct hclge_get_fd_allocation_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); + return ret; + } + + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); + + return ret; +} + +static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; +} + +static int hclge_init_fd_config(struct hclge_dev *hdev) +{ +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; + break; + default: + dev_err(&hdev->pdev->dev, + "Unsupported flow director mode %d\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + hdev->fd_cfg.fd_en = true; + hdev->fd_cfg.proto_support = + TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | + UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { + hdev->fd_cfg.proto_support |= ETHER_FLOW; + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); + } + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + + return ret; +} + +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) +{ + struct hclge_fd_ad_config_cmd *req; + struct hclge_desc desc; + u64 ad_data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + + return ret; +} + +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) +{ + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; + int i; + + if (rule->unused_tuple & tuple_bit) + return true; + + switch (tuple_bit) { + case 0: + return false; + case BIT(INNER_DST_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + calc_y(key_y[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + } + + return true; + case BIT(INNER_SRC_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + calc_y(key_y[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + } + + return true; + case BIT(INNER_VLAN_TAG_FST): + calc_x(tmp_x_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + calc_y(tmp_y_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_ETH_TYPE): + calc_x(tmp_x_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + calc_y(tmp_y_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_IP_TOS): + calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + + return true; + case BIT(INNER_IP_PROTO): + calc_x(*key_x, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + calc_y(*key_y, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + + return true; + case BIT(INNER_SRC_IP): + calc_x(tmp_x_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + calc_y(tmp_y_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_DST_IP): + calc_x(tmp_x_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + calc_y(tmp_y_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_SRC_PORT): + calc_x(tmp_x_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + calc_y(tmp_y_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_DST_PORT): + calc_x(tmp_x_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + calc_y(tmp_y_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + default: + return false; + } +} + +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) +{ + u32 port_number = 0; + + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } + + return port_number; +} + +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + int i; + + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); + + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + int i, ret, tuple_size; + u8 meta_data_region; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + for (i = 0 ; i < MAX_TUPLE; i++) { + bool tuple_valid; + u32 check_tuple; + + tuple_size = tuple_key_info[i].key_length / 8; + check_tuple = key_cfg->tuple_active & BIT(i); + + tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; + + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); + + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; + } + + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; +} + +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_ad_data ad_data; + + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + ad_data.forward_to_direct_queue = false; + ad_data.queue_id = 0; + } else { + ad_data.drop_packet = false; + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + ad_data.use_counter = false; + ad_data.counter_id = 0; + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); +} + +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, u32 *unused) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + struct ethtool_tcpip6_spec *tcp_ip6_spec; + struct ethtool_usrip6_spec *usr_ip6_spec; + struct ethhdr *ether_spec; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!(fs->flow_type & hdev->fd_cfg.proto_support)) + return -EOPNOTSUPP; + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + if (!tcp_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip4_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip4_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (!tcp_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + break; + case IP_USER_FLOW: + usr_ip4_spec = &fs->h_u.usr_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (!usr_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + if (!usr_ip4_spec->proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip4_spec->l4_4_bytes) + return -EOPNOTSUPP; + + if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS); + + if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && + !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && + !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip6_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip6_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (tcp_ip6_spec->tclass) + return -EOPNOTSUPP; + + break; + case IPV6_USER_FLOW: + usr_ip6_spec = &fs->h_u.usr_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | + BIT(INNER_DST_PORT); + + if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && + !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && + !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip6_spec->l4_proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip6_spec->tclass) + return -EOPNOTSUPP; + + if (usr_ip6_spec->l4_4_bytes) + return -EOPNOTSUPP; + + break; + case ETHER_FLOW: + ether_spec = &fs->h_u.ether_spec; + *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(ether_spec->h_source)) + *unused |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(ether_spec->h_dest)) + *unused |= BIT(INNER_DST_MAC); + + if (!ether_spec->h_proto) + *unused |= BIT(INNER_ETH_TYPE); + + break; + default: + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT)) { + if (fs->h_ext.vlan_etype) + return -EOPNOTSUPP; + if (!fs->h_ext.vlan_tci) + *unused |= BIT(INNER_VLAN_TAG_FST); + + if (fs->m_ext.vlan_tci) { + if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + } + } else { + *unused |= BIT(INNER_VLAN_TAG_FST); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) + return -EOPNOTSUPP; + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused |= BIT(INNER_DST_MAC); + else + *unused &= ~(BIT(INNER_DST_MAC)); + } + + return 0; +} + +static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + } + + return rule && rule->location == location; +} + +static int hclge_fd_update_rule_list(struct hclge_dev *hdev, + struct hclge_fd_rule *new_rule, + u16 location, + bool is_add) +{ + struct hclge_fd_rule *rule = NULL, *parent = NULL; + struct hlist_node *node2; + + if (is_add && !new_rule) + return -EINVAL; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + parent = rule; + } + + if (rule && rule->location == location) { + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + + if (!is_add) + return 0; + + } else if (!is_add) { + dev_err(&hdev->pdev->dev, + "delete fail, rule %d is inexistent\n", + location); + return -EINVAL; + } + + INIT_HLIST_NODE(&new_rule->rule_node); + + if (parent) + hlist_add_behind(&new_rule->rule_node, &parent->rule_node); + else + hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + + hdev->hclge_fd_rule_num++; + + return 0; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IP_USER_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.tcp_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.tcp_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.tcp_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.tcp_ip6_spec.ip6dst, 4); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IPV6_USER_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.usr_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.usr_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.usr_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.usr_ip6_spec.ip6dst, 4); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case ETHER_FLOW: + ether_addr_copy(rule->tuples.src_mac, + fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, + fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, + fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, + fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = + be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = + be16_to_cpu(fs->m_u.ether_spec.h_proto); + + break; + default: + return -EOPNOTSUPP; + } + + switch (flow_type) { + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_SCTP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_TCP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_UDP; + rule->tuples_mask.ip_proto = 0xFF; + break; + default: + break; + } + + if ((fs->flow_type & FLOW_EXT)) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } + + return 0; +} + +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + if (!hdev->fd_cfg.fd_en) { + dev_warn(&hdev->pdev->dev, + "Please enable flow director first\n"); + return -EOPNOTSUPP; + } + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused); + if (ret) { + dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); + return ret; + } + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + u16 tqps; + + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%d) > max tqp num (%d)\n", + ring, tqps - 1); + return -EINVAL; + } + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%d) > max vf num (%d)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + + action = HCLGE_FD_ACTION_ACCEPT_PACKET; + q_index = ring; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule); + if (ret) + goto free_rule; + + rule->flow_type = fs->flow_type; + + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); + if (ret) + goto free_rule; + + return ret; + +free_rule: + kfree(rule); + return ret; +} + +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!hclge_fd_rule_exist(hdev, fs->location)) { + dev_err(&hdev->pdev->dev, + "Delete fail, rule %d is inexistent\n", + fs->location); + return -ENOENT; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + fs->location, NULL, false); + if (ret) + return ret; + + return hclge_fd_update_rule_list(hdev, NULL, fs->location, + false); +} + +static void hclge_del_all_fd_entries(struct hnae3_handle *handle, + bool clear_list) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + if (!hnae3_dev_fd_supported(hdev)) + return; + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } else { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + } +} + +static int hclge_restore_fd_entries(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (!ret) + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + + if (ret) { + dev_warn(&hdev->pdev->dev, + "Restore rule %d failed, remove it\n", + rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } + return 0; +} + +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + return 0; +} + +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + struct hlist_node *node2; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= fs->location) + break; + } + + if (!rule || fs->location != rule->location) + return -ENOENT; + + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs->h_u.tcp_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.tcp_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.tcp_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip4_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip4_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.tcp_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + break; + case IP_USER_FLOW: + fs->h_u.usr_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.usr_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.usr_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.usr_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; + fs->m_u.usr_ip4_spec.proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip6_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip6_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + break; + case IPV6_USER_FLOW: + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; + fs->m_u.usr_ip6_spec.l4_proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + break; + case ETHER_FLOW: + ether_addr_copy(fs->h_u.ether_spec.h_source, + rule->tuples.src_mac); + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_source); + else + ether_addr_copy(fs->m_u.ether_spec.h_source, + rule->tuples_mask.src_mac); + + ether_addr_copy(fs->h_u.ether_spec.h_dest, + rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + + fs->h_u.ether_spec.h_proto = + cpu_to_be16(rule->tuples.ether_proto); + fs->m_u.ether_spec.h_proto = + rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); + + break; + default: + return -EOPNOTSUPP; + } + + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + cpu_to_be16(VLAN_VID_MASK) : + cpu_to_be16(rule->tuples_mask.vlan_tag1); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } + + return 0; +} + +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->location; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hdev->fd_cfg.fd_en = enable; + if (!enable) + hclge_del_all_fd_entries(handle, false); + else + hclge_restore_fd_entries(handle); +} + static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { struct hclge_desc desc; @@ -3639,7 +4632,7 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } -static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) { struct hclge_config_mac_mode_cmd *req; struct hclge_desc desc; @@ -3659,6 +4652,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3673,22 +4668,37 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; int ret, i = 0; + u8 loop_mode_b; - req = (struct hclge_serdes_lb_cmd *)&desc.data[0]; + req = (struct hclge_serdes_lb_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported serdes loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + if (en) { - req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->enable = loop_mode_b; + req->mask = loop_mode_b; } else { - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->mask = loop_mode_b; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3719,33 +4729,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) return -EIO; } + hclge_cfg_mac_mode(hdev, en); return 0; } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int ret; - - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = hclge_set_mac_loopback(hdev, en); - break; - case HNAE3_MAC_INTER_LOOP_SERDES: - ret = hclge_set_serdes_loopback(hdev, en); - break; - default: - ret = -ENOTSUPP; - dev_err(&hdev->pdev->dev, - "loop_mode %d is not supported\n", loop_mode); - break; - } - - return ret; -} - static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, int stream_id, bool enable) { @@ -3766,6 +4753,37 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, return ret; } +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, ret; + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + for (i = 0; i < vport->alloc_tqps; i++) { + ret = hclge_tqp_enable(hdev, i, 0, en); + if (ret) + return ret; + } + + return 0; +} + static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3809,6 +4827,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; int i; + set_bit(HCLGE_STATE_DOWN, &hdev->state); + del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); @@ -3950,174 +4970,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } -static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) -{ - u16 high_val = addr[1] | (addr[0] << 8); - struct hclge_dev *hdev = vport->back; - u32 rsh = 4 - hdev->mta_mac_sel_type; - u16 ret_val = (high_val >> rsh) & 0xfff; - - return ret_val; -} - -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable) -{ - struct hclge_mta_filter_mode_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_mta_filter_mode_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - - hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config mat filter mode failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable) -{ - struct hclge_cfg_func_mta_filter_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - - hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); - req->function_id = func_id; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config func_id enable failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -static int hclge_set_mta_table_item(struct hclge_vport *vport, - u16 idx, - bool enable) -{ - struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item_cmd *req; - struct hclge_desc desc; - u16 item_idx = 0; - int ret; - - req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - - hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(item_idx); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mta table item failed for cmd_send, ret =%d.\n", - ret); - return ret; - } - - if (enable) - set_bit(idx, vport->mta_shadow); - else - clear_bit(idx, vport->mta_shadow); - - return 0; -} - -static int hclge_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; - struct hclge_vport *vport = hclge_get_vport(handle); - struct net_device *netdev = handle->kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - memset(mta_status, 0, sizeof(mta_status)); - - /* update mta_status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclge_update_mta_status_common(vport, mta_status, - 0, HCLGE_MTA_TBL_SIZE, true); -} - -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter) -{ - struct hclge_dev *hdev = vport->back; - u16 update_max = idx + count; - u16 check_max; - int ret = 0; - bool used; - u16 i; - - /* setup mta check range */ - if (update_filter) { - i = 0; - check_max = HCLGE_MTA_TBL_SIZE; - } else { - i = idx; - check_max = update_max; - } - - used = false; - /* check and update all mta item */ - for (; i < check_max; i++) { - /* ignore unused item */ - if (!test_bit(i, vport->mta_shadow)) - continue; - - /* if i in update range then update it */ - if (i >= idx && i < update_max) - if (!test_bit(i - idx, status)) - hclge_set_mta_table_item(vport, i, false); - - if (!used && test_bit(i, vport->mta_shadow)) - used = true; - } - - /* no longer use mta, disable it */ - if (vport->accept_mta_mc && update_filter && !used) { - ret = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - false); - if (ret) - dev_err(&hdev->pdev->dev, - "disable func mta filter fail ret=%d\n", - ret); - else - vport->accept_mta_mc = false; - } - - return ret; -} - static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4241,6 +5093,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "Alloc umv space failed, want %d, get %d\n", + hdev->wanted_umv_size, allocated_size); + + mutex_init(&hdev->umv_mutex); + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + + return 0; +} + +static int hclge_uninit_umv_space(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->max_umv_size > 0) { + ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, + false); + if (ret) + return ret; + hdev->max_umv_size = 0; + } + mutex_destroy(&hdev->umv_mutex); + + return 0; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "%s umv space failed for cmd_send, ret =%d\n", + is_alloc ? "allocate" : "free", ret); + return ret; + } + + if (is_alloc && allocated_size) + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->umv_mutex); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + mutex_unlock(&hdev->umv_mutex); +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + mutex_lock(&hdev->umv_mutex); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + mutex_unlock(&hdev->umv_mutex); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->umv_mutex); + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size) + hdev->share_umv_size--; + vport->used_umv_num++; + } + mutex_unlock(&hdev->umv_mutex); +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { @@ -4286,8 +5250,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, * is not allowed in the mac vlan table. */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); - if (ret == -ENOENT) - return hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (ret == -ENOENT) { + if (!hclge_is_umv_space_full(vport)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + return ret; + } + + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } /* check if we just hit the duplicate */ if (!ret) @@ -4330,6 +5305,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret) + hclge_update_umv_space(vport, true); return ret; } @@ -4348,7 +5325,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; - u16 tbl_idx; int status; /* mac addr check */ @@ -4362,7 +5338,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4378,25 +5354,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* If mc mac vlan table is full, use MTA table */ - if (status == -ENOSPC) { - if (!vport->accept_mta_mc) { - status = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - true); - if (status) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", - status); - return status; - } - vport->accept_mta_mc = true; - } - - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); - } + if (status == -ENOSPC) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; } @@ -4429,7 +5388,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4598,8 +5557,20 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return 0; } +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.phydev) + return -EOPNOTSUPP; + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - bool filter_en) + u8 fe_type, bool filter_en) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -4609,7 +5580,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en; + req->vlan_fe = filter_en ? fe_type : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -4621,13 +5592,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, #define HCLGE_FILTER_TYPE_VF 0 #define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); + if (hdev->pdev->revision >= 0x21) { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable); + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, enable); + } else { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, enable); + } + if (enable) + handle->netdev_flags |= HNAE3_VLAN_FLTR; + else + handle->netdev_flags &= ~HNAE3_VLAN_FLTR; } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, @@ -4686,9 +5678,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 if (!req0->resp_code) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { + dev_warn(&hdev->pdev->dev, + "vlan %d filter is not in vf vlan table\n", + vlan); + return 0; + } + dev_err(&hdev->pdev->dev, "Kill vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4732,6 +5732,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_idx, vport_num = 0; int ret; + if (is_kill && !vlan_id) + return 0; + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 0, proto); if (ret) { @@ -4761,7 +5764,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return -EINVAL; } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) @@ -4896,7 +5899,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); - tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); @@ -4913,18 +5916,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_DEF_VLAN_TYPE 0x8100 - struct hnae3_handle *handle; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); - if (ret) - return ret; + if (hdev->pdev->revision >= 0x21) { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true); + if (ret) + return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); - if (ret) - return ret; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true); + if (ret) + return ret; + } else { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true); + if (ret) + return ret; + } + + handle->netdev_flags |= HNAE3_VLAN_FLTR; hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; @@ -4970,7 +5985,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; } - handle = &hdev->vport[0].nic; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -5187,20 +6201,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } -static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, - u32 *flowctrl_adv) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) - return; - - *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | - (phydev->advertising & ADVERTISED_Asym_Pause); -} - static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -5208,13 +6208,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (!phydev) return; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - if (rx_en) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (tx_en) - phydev->advertising ^= ADVERTISED_Asym_Pause; + phy_set_asym_pause(phydev, rx_en, tx_en); } static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) @@ -5256,11 +6250,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - if (phydev->advertising & ADVERTISED_Pause) - local_advertising = ADVERTISE_PAUSE_CAP; - - if (phydev->advertising & ADVERTISED_Asym_Pause) - local_advertising |= ADVERTISE_PAUSE_ASYM; + local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -5444,26 +6434,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->nic.client = client; ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; ret = hclge_init_instance_hw(hdev); if (ret) { client->ops->uninit_instance(&vport->nic, 0); - return ret; + goto clear_nic; } + hnae3_set_client_init_flag(client, ae_dev, 1); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, + ae_dev, 1); } break; @@ -5473,7 +6468,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: @@ -5485,16 +6482,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(client, ae_dev, 1); } + + break; + default: + return -EINVAL; } } return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, @@ -5514,7 +6526,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) { + if (hdev->nic_client && client->ops->uninit_instance) { hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; @@ -5697,6 +6709,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } } + ret = hclge_init_umv_space(hdev); + if (ret) { + dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); + goto err_msi_irq_uninit; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5734,6 +6752,20 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_hw_error_set_state(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "hw error interrupts enable failed, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -5810,6 +6842,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_umv_space(hdev); + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5840,6 +6874,19 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + return ret; + } + + /* Re-enable the TM hw error interrupts because + * they get disabled on core/global reset. + */ + if (hclge_enable_tm_hw_error(hdev, true)) + dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -5856,10 +6903,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); + hclge_uninit_umv_space(hdev); + /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); + hclge_hw_error_set_state(hdev, false); hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); @@ -5887,18 +6937,12 @@ static void hclge_get_channels(struct hnae3_handle *handle, } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u16 temp_tqps = 0; - int i; - for (i = 0; i < hdev->num_tqps; i++) { - if (!hdev->htqp[i].alloced) - temp_tqps++; - } - *free_tqps = temp_tqps; + *alloc_tqps = vport->alloc_tqps; *max_rss_size = hdev->rss_size_max; } @@ -6228,27 +7272,6 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static void hclge_get_port_type(struct hnae3_handle *handle, - u8 *port_type) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u8 media_type = hdev->hw.mac.media_type; - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - *port_type = PORT_FIBRE; - break; - case HNAE3_MEDIA_TYPE_COPPER: - *port_type = PORT_TP; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - *port_type = PORT_OTHER; - break; - } -} - static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -6276,11 +7299,11 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, .add_uc_addr = hclge_add_uc_addr, .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, - .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, @@ -6301,12 +7324,19 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, - .get_flowctrl_adv = hclge_get_flowctrl_adv, .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, .get_link_mode = hclge_get_link_mode, - .get_port_type = hclge_get_port_type, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .del_all_fd_entries = hclge_del_all_fd_entries, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .restore_fd_rules = hclge_restore_fd_entries, + .enable_fd = hclge_enable_fd, + .process_hw_error = hclge_process_ras_hw_error, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 1528fb3fa6be..e3dfd654eca9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -14,6 +14,8 @@ #define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" +#define HCLGE_MAX_PF_NUM 8 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -53,7 +55,9 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 -#define HCLGE_MTA_TBL_SIZE 4096 +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) #define HCLGE_TQP_RESET_TRY_TIMES 10 @@ -79,6 +83,19 @@ #define HCLGE_VF_NUM_PER_CMD 64 #define HCLGE_VF_NUM_PER_BYTE 8 +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) + /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 @@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; -enum hclge_mta_dmac_sel_type { - HCLGE_MAC_ADDR_47_36, - HCLGE_MAC_ADDR_46_35, - HCLGE_MAC_ADDR_45_34, - HCLGE_MAC_ADDR_44_33, -}; - struct hclge_mac { u8 phy_addr; u8 flag; @@ -238,6 +248,7 @@ struct hclge_cfg { u8 default_speed; u32 numa_node_map; u8 speed_ability; + u16 umv_space; }; struct hclge_tm_info { @@ -256,109 +267,6 @@ struct hclge_comm_stats_str { unsigned long offset; }; -/* all 64bit stats, opcode id: 0x0030 */ -struct hclge_64_bit_stats { - /* query_igu_stat */ - u64 igu_rx_oversize_pkt; - u64 igu_rx_undersize_pkt; - u64 igu_rx_out_all_pkt; - u64 igu_rx_uni_pkt; - u64 igu_rx_multi_pkt; - u64 igu_rx_broad_pkt; - u64 rsv0; - - /* query_egu_stat */ - u64 egu_tx_out_all_pkt; - u64 egu_tx_uni_pkt; - u64 egu_tx_multi_pkt; - u64 egu_tx_broad_pkt; - - /* ssu_ppp packet stats */ - u64 ssu_ppp_mac_key_num; - u64 ssu_ppp_host_key_num; - u64 ppp_ssu_mac_rlt_num; - u64 ppp_ssu_host_rlt_num; - - /* ssu_tx_in_out_dfx_stats */ - u64 ssu_tx_in_num; - u64 ssu_tx_out_num; - /* ssu_rx_in_out_dfx_stats */ - u64 ssu_rx_in_num; - u64 ssu_rx_out_num; -}; - -/* all 32bit stats, opcode id: 0x0031 */ -struct hclge_32_bit_stats { - u64 igu_rx_err_pkt; - u64 igu_rx_no_eof_pkt; - u64 igu_rx_no_sof_pkt; - u64 egu_tx_1588_pkt; - u64 egu_tx_err_pkt; - u64 ssu_full_drop_num; - u64 ssu_part_drop_num; - u64 ppp_key_drop_num; - u64 ppp_rlt_drop_num; - u64 ssu_key_drop_num; - u64 pkt_curr_buf_cnt; - u64 qcn_fb_rcv_cnt; - u64 qcn_fb_drop_cnt; - u64 qcn_fb_invaild_cnt; - u64 rsv0; - u64 rx_packet_tc0_in_cnt; - u64 rx_packet_tc1_in_cnt; - u64 rx_packet_tc2_in_cnt; - u64 rx_packet_tc3_in_cnt; - u64 rx_packet_tc4_in_cnt; - u64 rx_packet_tc5_in_cnt; - u64 rx_packet_tc6_in_cnt; - u64 rx_packet_tc7_in_cnt; - u64 rx_packet_tc0_out_cnt; - u64 rx_packet_tc1_out_cnt; - u64 rx_packet_tc2_out_cnt; - u64 rx_packet_tc3_out_cnt; - u64 rx_packet_tc4_out_cnt; - u64 rx_packet_tc5_out_cnt; - u64 rx_packet_tc6_out_cnt; - u64 rx_packet_tc7_out_cnt; - - /* Tx packet level statistics */ - u64 tx_packet_tc0_in_cnt; - u64 tx_packet_tc1_in_cnt; - u64 tx_packet_tc2_in_cnt; - u64 tx_packet_tc3_in_cnt; - u64 tx_packet_tc4_in_cnt; - u64 tx_packet_tc5_in_cnt; - u64 tx_packet_tc6_in_cnt; - u64 tx_packet_tc7_in_cnt; - u64 tx_packet_tc0_out_cnt; - u64 tx_packet_tc1_out_cnt; - u64 tx_packet_tc2_out_cnt; - u64 tx_packet_tc3_out_cnt; - u64 tx_packet_tc4_out_cnt; - u64 tx_packet_tc5_out_cnt; - u64 tx_packet_tc6_out_cnt; - u64 tx_packet_tc7_out_cnt; - - /* packet buffer statistics */ - u64 pkt_curr_buf_tc0_cnt; - u64 pkt_curr_buf_tc1_cnt; - u64 pkt_curr_buf_tc2_cnt; - u64 pkt_curr_buf_tc3_cnt; - u64 pkt_curr_buf_tc4_cnt; - u64 pkt_curr_buf_tc5_cnt; - u64 pkt_curr_buf_tc6_cnt; - u64 pkt_curr_buf_tc7_cnt; - - u64 mb_uncopy_num; - u64 lo_pri_unicast_rlt_drop_num; - u64 hi_pri_multicast_rlt_drop_num; - u64 lo_pri_multicast_rlt_drop_num; - u64 rx_oq_drop_pkt_cnt; - u64 tx_oq_drop_pkt_cnt; - u64 nic_l2_err_drop_pkt_cnt; - u64 roc_l2_err_drop_pkt_cnt; -}; - /* mac stats ,opcode id: 0x0032 */ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; @@ -450,8 +358,6 @@ struct hclge_mac_stats { #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) struct hclge_hw_stats { struct hclge_mac_stats mac_stats; - struct hclge_64_bit_stats all_64_bit_stats; - struct hclge_32_bit_stats all_32_bit_stats; u32 stats_timer; }; @@ -464,6 +370,221 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +struct key_info { + u8 key_type; + u8 key_length; +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_DROP_PACKET, +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u8 fd_en; + u16 max_key_length; + u32 proto_support; + u32 rule_num[2]; /* rule entry number */ + u16 cnt_num[2]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[2]; +}; + +struct hclge_fd_rule_tuples { + u8 src_mac[6]; + u8 dst_mac[6]; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + u8 action; + u16 vf_id; + u16 queue_id; + u16 location; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; +}; + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) ((x) = (~(k) & (v))) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -547,12 +668,22 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ - enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Multicast filter enable */ - struct hclge_vlan_type_cfg vlan_type_cfg; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + u16 hclge_fd_rule_num; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + struct mutex umv_mutex; /* protect share_umv_size */ }; /* VPort level vlan tag configuration for TX direction */ @@ -605,13 +736,12 @@ struct hclge_vport { struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; + u16 used_umv_num; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; - - bool accept_mta_mc; /* whether to accept mta filter multicast */ - unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -626,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr); -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter); - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb3..04462a347a94 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, - u8 *msg, u8 idx, bool is_end) -{ -#define HCLGE_MTA_STATUS_MSG_SIZE 13 -#define HCLGE_MTA_STATUS_MSG_BITS \ - (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGE_MTA_STATUS_MSG_END_BITS \ - (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) - unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_ofs; - u8 msg_bit; - - tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : - HCLGE_MTA_STATUS_MSG_BITS; - - /* set msg field */ - msg_ofs = 0; - msg_bit = 0; - memset(status, 0, sizeof(status)); - for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { - if (msg[msg_ofs] & BIT(msg_bit)) - set_bit(tbl_idx, status); - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - return hclge_update_mta_status_common(vport, - status, idx * HCLGE_MTA_STATUS_MSG_BITS, - tbl_cnt, is_end); -} - static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, status = hclge_add_mc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { - u8 func_id = vport->vport_id; - bool enable = mbx_req->msg[2]; - - status = hclge_cfg_func_mta_filter(hdev, func_id, enable); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { - resp_data = hdev->mta_mac_sel_type; - resp_len = sizeof(u8); - gen_resp = true; - status = 0; - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { - /* mta status update msg format - * msg[2.6 : 2.0] msg index - * msg[2.7] msg is end - * msg[15 : 3] mta status bits[103 : 0] - */ - bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; - - status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], - mbx_req->msg[2] & 0x7F, - is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 398971a062f4..24b1f2a0c32a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -10,8 +10,6 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ - SUPPORTED_Pause | \ - SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) } phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 5db70a1451c5..aa5cb9834d73 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -172,7 +172,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap) { struct hclge_desc desc; - struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); @@ -188,11 +188,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); pause_param->pause_trans_gap = pause_trans_gap; pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); @@ -207,7 +208,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) u8 trans_gap; int ret; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); @@ -297,7 +298,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, } static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, - u8 q_id, u16 qs_id) + u16 q_id, u16 qs_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; @@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) return 0; } -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 i, bit_map = 0; + for (i = 0; i < hdev->num_alloc_vport; i++) { + if (num_tc > hdev->vport[i].alloc_tqps) + return -EINVAL; + } + hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); + + return 0; } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index dd4c194747c1..25eef13a3e14 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd { u8 pause_trans_gap; u8 rsvd; __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; }; struct hclge_pfc_stats_cmd { @@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index fb471fe2c494..0d3b445f6799 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -132,9 +132,9 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + return 0; case HCLGEVF_TYPE_CRQ: reg_val = (u32)ring->desc_dma_addr; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); @@ -145,12 +145,12 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + return 0; + default: + return -EINVAL; } - - return 0; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 19b32860309c..bc294b0c8b62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -89,6 +89,7 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, /* Mailbox cmd */ @@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; -#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { u8 hash_config; @@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd { struct hclgevf_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; - u8 ipv4_stcp_en; + u8 ipv4_sctp_en; u8 ipv4_fragment_en; u8 ipv6_tcp_en; u8 ipv6_udp_en; - u8 ipv6_stcp_en; + u8 ipv6_sctp_en; u8 ipv6_fragment_en; u8 rsv[16]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addf..e0a86a58342c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -31,16 +31,15 @@ static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; struct hclgevf_desc desc; struct hclgevf_tqp *tqp; int status; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_RX_STATUS, true); @@ -77,17 +76,16 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_tqp *tqp; u64 *buff = data; int i; - for (i = 0; i < hdev->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; } for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; } @@ -96,29 +94,29 @@ static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; - return hdev->num_tqps * 2; + return kinfo->num_tqps * 2; } static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; u8 *buff = data; int i = 0; - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -182,7 +180,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } -static int hclge_get_queue_info(struct hclgevf_dev *hdev) +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 8 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; @@ -299,6 +297,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) client = handle->client; + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; + if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); hdev->hw.mac.link = link_state; @@ -385,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclgevf_rss_config_cmd *req; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + + return 0; +} + static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) { return HCLGEVF_RSS_KEY_SIZE; @@ -465,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, - u8 *key) +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_config_cmd *req; - int lkup_times = key ? 3 : 1; - struct hclgevf_desc desc; - int key_offset; - int key_size; - int status; - - req = (struct hclgevf_rss_config_cmd *)desc.data; - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); - - for (key_offset = 0; key_offset < lkup_times; key_offset++) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - true); - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "failed to get hardware RSS cfg, status = %d\n", - status); - return status; + if (handle->pdev->revision >= 0x21) { + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->hash_algo) { + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } } - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGEVF_RSS_HASH_KEY_NUM; - + /* Get the RSS Key required by the user */ if (key) - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, - req->hash_key, - key_size); - } - - if (hash) { - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) - *hash = ETH_RSS_HASH_TOP; - else - *hash = ETH_RSS_HASH_UNKNOWN; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); } - return 0; -} - -static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; - if (indir) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); + return 0; } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, @@ -534,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int ret, i; + + if (handle->pdev->revision >= 0x21) { + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + switch (hfunc) { + case ETH_RSS_HASH_TOP: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EINVAL; + } + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, + HCLGEVF_RSS_KEY_SIZE); + } + } /* update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) @@ -544,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, return hclgevf_set_rss_indir_table(hdev); } +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGEVF_D_PORT_BIT; + else + hash_sets &= ~HCLGEVF_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGEVF_S_IP_BIT; + else + hash_sets &= ~HCLGEVF_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGEVF_D_IP_BIT; + else + hash_sets &= ~HCLGEVF_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGEVF_V_TAG_BIT; + + return hash_sets; +} + +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + u8 tuple_sets; + int ret; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclgevf_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 tuple_sets; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + nfc->data = 0; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; + break; + default: + return -EINVAL; + } + + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGEVF_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGEVF_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGEVF_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGEVF_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, + struct hclgevf_rss_cfg *rss_cfg) +{ + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; +} + static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -735,138 +965,16 @@ static int hclgevf_get_queue_id(struct hnae3_queue *queue) static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_tqp *tqp; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) -{ - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; - int ret; - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, - NULL, 0, true, &resp_msg, sizeof(u8)); - - if (ret) { - dev_err(&hdev->pdev->dev, - "Read mta type fail, ret=%d.\n", ret); - return ret; - } - - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { - dev_err(&hdev->pdev->dev, - "Read mta type invalid, resp=%d.\n", resp_msg); - return -EINVAL; - } - - hdev->mta_mac_sel_type = resp_msg; - - return 0; -} - -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, - const u8 *addr) -{ - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; - u16 high_val = addr[1] | (addr[0] << 8); - - return (high_val >> rsh) & 0xfff; -} - -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, - unsigned long *status) -{ -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 -#define HCLGEVF_MTA_STATUS_MSG_BITS \ - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_cnt; - u8 msg_idx; - int ret; - - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, - HCLGEVF_MTA_STATUS_MSG_BITS); - tbl_idx = 0; - msg_idx = 0; - while (msg_cnt--) { - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; - u8 *p = &msg[1]; - u8 msg_ofs; - u8 msg_bit; - - memset(msg, 0, sizeof(msg)); - - /* set index field */ - msg[0] = 0x7F & msg_idx; - - /* set end flag field */ - if (msg_cnt == 0) { - msg[0] |= 0x80; - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; - } else { - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; - } - - /* set status field */ - msg_ofs = 0; - msg_bit = 0; - while (tbl_cnt--) { - if (test_bit(tbl_idx, status)) - p[msg_ofs] |= BIT(msg_bit); - - tbl_idx++; - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, - msg, sizeof(msg), false, NULL, 0); - if (ret) - break; - - msg_idx++; - } - - return ret; -} - -static int hclgevf_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct net_device *netdev = hdev->nic.kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - /* clear status */ - memset(mta_status, 0, sizeof(mta_status)); - - /* update status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclgevf_do_update_mta_status(hdev, mta_status); -} - static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1106,7 +1214,8 @@ static int hclgevf_do_reset(struct hclgevf_dev *hdev) return status; } -static void hclgevf_reset_event(struct hnae3_handle *handle) +static void hclgevf_reset_event(struct pci_dev *pdev, + struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1341,8 +1450,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; + /* get queue configuration from PF */ - ret = hclge_get_queue_info(hdev); + ret = hclgevf_get_queue_info(hdev); if (ret) return ret; /* get tc configuration from PF */ @@ -1395,6 +1506,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; + if (hdev->pdev->revision >= 0x21) { + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + netdev_rss_key_fill(rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + if (ret) + return ret; + + } + /* Initialize RSS indirect table for each vport */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; @@ -1417,12 +1561,13 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_ae_start(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < handle->kinfo.num_tqps; i++) { + for (i = 0; i < kinfo->num_tqps; i++) { /* ring enable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1445,12 +1590,15 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < hdev->num_tqps; i++) { + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + for (i = 0; i < kinfo->num_tqps; i++) { /* Ring disable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1619,17 +1767,22 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, ae_dev, + 1); } break; case HNAE3_CLIENT_UNIC: @@ -1638,7 +1791,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: if (hnae3_dev_roce_supported(hdev)) { @@ -1649,15 +1804,29 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; } + + hnae3_set_client_init_flag(client, ae_dev, 1); + break; + default: + return -EINVAL; } return 0; + +clear_nic: + hdev->nic_client = NULL; + hdev->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + hdev->roce.client = NULL; + return ret; } static void hclgevf_uninit_client_instance(struct hnae3_client *client, @@ -1666,13 +1835,19 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, struct hclgevf_dev *hdev = ae_dev->priv; /* un-init roce, if it exists */ - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + hdev->roce_client = NULL; + hdev->roce.client = NULL; + } /* un-init nic/unic, if this was not called by roce client */ - if ((client->ops->uninit_instance) && - (client->type != HNAE3_CLIENT_ROCE)) + if (client->ops->uninit_instance && hdev->nic_client && + client->type != HNAE3_CLIENT_ROCE) { client->ops->uninit_instance(&hdev->nic, 0); + hdev->nic_client = NULL; + hdev->nic.client = NULL; + } } static int hclgevf_pci_init(struct hclgevf_dev *hdev) @@ -1839,14 +2014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize mta type for this VF */ - ret = hclgevf_cfg_func_mta_type(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed(%d) to initialize MTA type\n", ret); - goto err_config; - } - /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -1943,11 +2110,11 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - *free_tqps = 0; + *alloc_tqps = hdev->num_tqps; *max_rss_size = hdev->rss_size_max; } @@ -1979,6 +2146,14 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static void hclgevf_get_media_type(struct hnae3_handle *handle, + u8 *media_type) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -1998,7 +2173,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, - .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -2007,6 +2181,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_rss_indir_size = hclgevf_get_rss_indir_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, @@ -2016,6 +2192,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, + .get_media_type = hclgevf_get_media_type, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index b23ba171473c..aed241e8ffab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -46,9 +46,13 @@ #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) - -#define HCLGEVF_MTA_TBL_SIZE 4096 -#define HCLGEVF_MTA_TYPE_SEL_MAX 4 +#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGEVF_D_PORT_BIT BIT(0) +#define HCLGEVF_S_PORT_BIT BIT(1) +#define HCLGEVF_D_IP_BIT BIT(2) +#define HCLGEVF_S_IP_BIT BIT(3) +#define HCLGEVF_V_TAG_BIT BIT(4) /* states of hclgevf device & tasks */ enum hclgevf_states { @@ -66,6 +70,7 @@ enum hclgevf_states { #define HCLGEVF_MPF_ENBALE 1 struct hclgevf_mac { + u8 media_type; u8 mac_addr[ETH_ALEN]; int link; u8 duplex; @@ -108,12 +113,24 @@ struct hclgevf_cfg { u32 numa_node_map; }; +struct hclgevf_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclgevf_rss_cfg { u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ u32 hash_algo; u32 rss_size; u8 hw_tc_map; u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ + struct hclgevf_rss_tuple_cfg rss_tuple_sets; }; struct hclgevf_misc_vector { @@ -156,8 +173,6 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ |