From bd7fc6e1957c2102866f9e464c1f2302e891b7e9 Mon Sep 17 00:00:00 2001 From: Shradha Gupta Date: Wed, 15 Mar 2023 04:55:13 -0700 Subject: net: mana: Add new MANA VF performance counters for easier troubleshooting Extended performance counter stats in 'ethtool -S ' output for MANA VF to facilitate troubleshooting. Tested-on: Ubuntu22 Signed-off-by: Shradha Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/mana_en.c | 62 +++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 6120f2b6684f..492474b4d8aa 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; + u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) txq = &apc->tx_qp[txq_idx].txq; gdma_sq = txq->gdma_sq; cq = &apc->tx_qp[txq_idx].tx_cq; + tx_stats = &txq->stats; pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; @@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; - if (pkt_fmt == MANA_SHORT_PKT_FMT) + if (pkt_fmt == MANA_SHORT_PKT_FMT) { pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); - else + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->short_pkt_fmt++; + u64_stats_update_end(&tx_stats->syncp); + } else { pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->long_pkt_fmt++; + u64_stats_update_end(&tx_stats->syncp); + } pkg.wqe_req.inline_oob_data = &pkg.tx_oob; pkg.wqe_req.flags = 0; @@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } + + if (skb->encapsulation) { + ihs = skb_inner_tcp_all_headers(skb); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - ihs; + u64_stats_update_end(&tx_stats->syncp); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + } else { + ihs = skb_tcp_all_headers(skb); + if (ipv6_has_hopopt_jumbo(skb)) + ihs -= sizeof(struct hop_jumbo_hdr); + } + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - ihs; + u64_stats_update_end(&tx_stats->syncp); + } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->csum_partial++; + u64_stats_update_end(&tx_stats->syncp); + if (csum_type == IPPROTO_TCP) { pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - if (mana_map_skb(skb, apc, &pkg)) + if (mana_map_skb(skb, apc, &pkg)) { + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->mana_map_err++; + u64_stats_update_end(&tx_stats->syncp); goto free_sgl_ptr; + } skb_queue_tail(&txq->pending_skbs, skb); @@ -1038,6 +1077,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (comp_read < 1) return; + apc->eth_stats.tx_cqes = comp_read; + for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1064,6 +1105,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VLAN_TAGGING_VIOLATION: WARN_ONCE(1, "TX: CQE error %d: ignored.\n", cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: @@ -1072,6 +1114,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) */ WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; return; } @@ -1118,6 +1161,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) WARN_ON_ONCE(1); cq->work_done = pkt_transmitted; + + apc->eth_stats.tx_cqes -= pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1252,12 +1297,15 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; struct net_device *ndev = rxq->ndev; struct mana_recv_buf_oob *rxbuf_oob; + struct mana_port_context *apc; struct device *dev = gc->dev; void *new_buf, *old_buf; struct page *new_page; u32 curr, pktlen; dma_addr_t da; + apc = netdev_priv(ndev); + switch (oob->cqe_hdr.cqe_type) { case CQE_RX_OKAY: break; @@ -1270,6 +1318,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, case CQE_RX_COALESCED_4: netdev_err(ndev, "RX coalescing is unsupported\n"); + apc->eth_stats.rx_coalesced_err++; return; case CQE_RX_OBJECT_FENCE: @@ -1279,6 +1328,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, default: netdev_err(ndev, "Unknown RX CQE type = %d\n", oob->cqe_hdr.cqe_type); + apc->eth_stats.rx_cqe_unknown_type++; return; } @@ -1341,11 +1391,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq) { struct gdma_comp *comp = cq->gdma_comp_buf; struct mana_rxq *rxq = cq->rxq; + struct mana_port_context *apc; int comp_read, i; + apc = netdev_priv(rxq->ndev); + comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); + apc->eth_stats.rx_cqes = comp_read; rxq->xdp_flush = false; for (i = 0; i < comp_read; i++) { @@ -1357,6 +1411,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq) return; mana_process_rx_cqe(rxq, cq, &comp[i]); + + apc->eth_stats.rx_cqes--; } if (rxq->xdp_flush) -- cgit v1.2.3 From ce518bc3e9ca342309995c9270c3ec4892963695 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Wed, 12 Apr 2023 14:16:00 -0700 Subject: net: mana: Use napi_build_skb in RX path Use napi_build_skb() instead of build_skb() to take advantage of the NAPI percpu caches to obtain skbuff_head. Signed-off-by: Haiyang Zhang Reviewed-by: Jesse Brandeburg Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/mana_en.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 492474b4d8aa..112c642dc89b 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1188,7 +1188,7 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, struct xdp_buff *xdp) { - struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE); + struct sk_buff *skb = napi_build_skb(buf_va, PAGE_SIZE); if (!skb) return NULL; -- cgit v1.2.3 From a2917b23497e4205db32271e4e06e142a9f8a6aa Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Wed, 12 Apr 2023 14:16:01 -0700 Subject: net: mana: Refactor RX buffer allocation code to prepare for various MTU Move out common buffer allocation code from mana_process_rx_cqe() and mana_alloc_rx_wqe() to helper functions. Refactor related variables so they can be changed in one place, and buffer sizes are in sync. Signed-off-by: Haiyang Zhang Reviewed-by: Jesse Brandeburg Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/mana_en.c | 154 +++++++++++++++----------- include/net/mana/mana.h | 6 +- 2 files changed, 91 insertions(+), 69 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 112c642dc89b..911954ff84ee 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1282,14 +1282,64 @@ drop_xdp: u64_stats_update_end(&rx_stats->syncp); drop: - WARN_ON_ONCE(rxq->xdp_save_page); - rxq->xdp_save_page = virt_to_page(buf_va); + WARN_ON_ONCE(rxq->xdp_save_va); + /* Save for reuse */ + rxq->xdp_save_va = buf_va; ++ndev->stats.rx_dropped; return; } +static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, + dma_addr_t *da, bool is_napi) +{ + struct page *page; + void *va; + + /* Reuse XDP dropped page if available */ + if (rxq->xdp_save_va) { + va = rxq->xdp_save_va; + rxq->xdp_save_va = NULL; + } else { + page = dev_alloc_page(); + if (!page) + return NULL; + + va = page_to_virt(page); + } + + *da = dma_map_single(dev, va + XDP_PACKET_HEADROOM, rxq->datasize, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, *da)) { + put_page(virt_to_head_page(va)); + return NULL; + } + + return va; +} + +/* Allocate frag for rx buffer, and save the old buf */ +static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq, + struct mana_recv_buf_oob *rxoob, void **old_buf) +{ + dma_addr_t da; + void *va; + + va = mana_get_rxfrag(rxq, dev, &da, true); + + if (!va) + return; + + dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, + DMA_FROM_DEVICE); + *old_buf = rxoob->buf_va; + + rxoob->buf_va = va; + rxoob->sgl[0].address = da; +} + static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct gdma_comp *cqe) { @@ -1299,10 +1349,8 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct mana_recv_buf_oob *rxbuf_oob; struct mana_port_context *apc; struct device *dev = gc->dev; - void *new_buf, *old_buf; - struct page *new_page; + void *old_buf = NULL; u32 curr, pktlen; - dma_addr_t da; apc = netdev_priv(ndev); @@ -1345,40 +1393,11 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - /* Reuse XDP dropped page if available */ - if (rxq->xdp_save_page) { - new_page = rxq->xdp_save_page; - rxq->xdp_save_page = NULL; - } else { - new_page = alloc_page(GFP_ATOMIC); - } - - if (new_page) { - da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, - DMA_FROM_DEVICE); - - if (dma_mapping_error(dev, da)) { - __free_page(new_page); - new_page = NULL; - } - } - - new_buf = new_page ? page_to_virt(new_page) : NULL; - - if (new_buf) { - dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, - DMA_FROM_DEVICE); - - old_buf = rxbuf_oob->buf_va; - - /* refresh the rxbuf_oob with the new page */ - rxbuf_oob->buf_va = new_buf; - rxbuf_oob->buf_dma_addr = da; - rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; - } else { - old_buf = NULL; /* drop the packet if no memory */ - } + mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf); + /* Unsuccessful refill will have old_buf == NULL. + * In this case, mana_rx_skb() will drop the packet. + */ mana_rx_skb(old_buf, oob, rxq); drop: @@ -1659,8 +1678,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc, mana_deinit_cq(apc, &rxq->rx_cq); - if (rxq->xdp_save_page) - __free_page(rxq->xdp_save_page); + if (rxq->xdp_save_va) + put_page(virt_to_head_page(rxq->xdp_save_va)); for (i = 0; i < rxq->num_rx_buf; i++) { rx_oob = &rxq->rx_oobs[i]; @@ -1668,10 +1687,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (!rx_oob->buf_va) continue; - dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, - DMA_FROM_DEVICE); + dma_unmap_single(dev, rx_oob->sgl[0].address, + rx_oob->sgl[0].size, DMA_FROM_DEVICE); - free_page((unsigned long)rx_oob->buf_va); + put_page(virt_to_head_page(rx_oob->buf_va)); rx_oob->buf_va = NULL; } @@ -1681,6 +1700,26 @@ static void mana_destroy_rxq(struct mana_port_context *apc, kfree(rxq); } +static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, + struct mana_rxq *rxq, struct device *dev) +{ + dma_addr_t da; + void *va; + + va = mana_get_rxfrag(rxq, dev, &da, false); + + if (!va) + return -ENOMEM; + + rx_oob->buf_va = va; + + rx_oob->sgl[0].address = da; + rx_oob->sgl[0].size = rxq->datasize; + rx_oob->sgl[0].mem_key = mem_key; + + return 0; +} + #define MANA_WQE_HEADER_SIZE 16 #define MANA_WQE_SGE_SIZE 16 @@ -1690,9 +1729,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; - struct page *page; - dma_addr_t da; u32 buf_idx; + int ret; WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); @@ -1703,25 +1741,12 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, rx_oob = &rxq->rx_oobs[buf_idx]; memset(rx_oob, 0, sizeof(*rx_oob)); - page = alloc_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize, - DMA_FROM_DEVICE); - - if (dma_mapping_error(dev, da)) { - __free_page(page); - return -ENOMEM; - } - - rx_oob->buf_va = page_to_virt(page); - rx_oob->buf_dma_addr = da; - rx_oob->num_sge = 1; - rx_oob->sgl[0].address = rx_oob->buf_dma_addr; - rx_oob->sgl[0].size = rxq->datasize; - rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; + + ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, + dev); + if (ret) + return ret; rx_oob->wqe_req.sgl = rx_oob->sgl; rx_oob->wqe_req.num_sge = rx_oob->num_sge; @@ -1780,9 +1805,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->ndev = ndev; rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; rxq->rxq_idx = rxq_idx; - rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); rxq->rxobj = INVALID_MANA_HANDLE; + rxq->datasize = ALIGN(ETH_FRAME_LEN, 64); + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) goto out; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index bb11a6535d80..037bcabf6b98 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -36,9 +36,6 @@ enum TRI_STATE { #define COMP_ENTRY_SIZE 64 -#define ADAPTER_MTU_SIZE 1500 -#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) - #define RX_BUFFERS_PER_QUEUE 512 #define MAX_SEND_BUFFERS_PER_QUEUE 256 @@ -282,7 +279,6 @@ struct mana_recv_buf_oob { struct gdma_wqe_request wqe_req; void *buf_va; - dma_addr_t buf_dma_addr; /* SGL of the buffer going to be sent has part of the work request. */ u32 num_sge; @@ -322,7 +318,7 @@ struct mana_rxq { struct bpf_prog __rcu *bpf_prog; struct xdp_rxq_info xdp_rxq; - struct page *xdp_save_page; + void *xdp_save_va; /* for reusing */ bool xdp_flush; int xdp_rc; /* XDP redirect return code */ -- cgit v1.2.3 From 2fbbd712baf1c60996554326728bbdbef5616e12 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Wed, 12 Apr 2023 14:16:02 -0700 Subject: net: mana: Enable RX path to handle various MTU sizes Update RX data path to allocate and use RX queue DMA buffers with proper size based on potentially various MTU sizes. Signed-off-by: Haiyang Zhang Reviewed-by: Jesse Brandeburg Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/mana_en.c | 38 ++++++++++++++++++++------- include/net/mana/mana.h | 7 +++++ 2 files changed, 35 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 911954ff84ee..8e7fa6e9c3b5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1185,10 +1185,10 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); } -static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, - struct xdp_buff *xdp) +static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, + uint pkt_len, struct xdp_buff *xdp) { - struct sk_buff *skb = napi_build_skb(buf_va, PAGE_SIZE); + struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); if (!skb) return NULL; @@ -1196,11 +1196,12 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, if (xdp->data_hard_start) { skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); - } else { - skb_reserve(skb, XDP_PACKET_HEADROOM); - skb_put(skb, pkt_len); + return skb; } + skb_reserve(skb, rxq->headroom); + skb_put(skb, pkt_len); + return skb; } @@ -1233,7 +1234,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, if (act != XDP_PASS && act != XDP_TX) goto drop_xdp; - skb = mana_build_skb(buf_va, pkt_len, &xdp); + skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); if (!skb) goto drop; @@ -1301,6 +1302,14 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, if (rxq->xdp_save_va) { va = rxq->xdp_save_va; rxq->xdp_save_va = NULL; + } else if (rxq->alloc_size > PAGE_SIZE) { + if (is_napi) + va = napi_alloc_frag(rxq->alloc_size); + else + va = netdev_alloc_frag(rxq->alloc_size); + + if (!va) + return NULL; } else { page = dev_alloc_page(); if (!page) @@ -1309,7 +1318,7 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, va = page_to_virt(page); } - *da = dma_map_single(dev, va + XDP_PACKET_HEADROOM, rxq->datasize, + *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, *da)) { @@ -1732,7 +1741,7 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, u32 buf_idx; int ret; - WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); + WARN_ON(rxq->datasize == 0); *rxq_size = 0; *cq_size = 0; @@ -1788,6 +1797,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_obj_spec wq_spec; struct mana_obj_spec cq_spec; + unsigned int mtu = ndev->mtu; struct gdma_queue_spec spec; struct mana_cq *cq = NULL; struct gdma_context *gc; @@ -1807,7 +1817,15 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->rxq_idx = rxq_idx; rxq->rxobj = INVALID_MANA_HANDLE; - rxq->datasize = ALIGN(ETH_FRAME_LEN, 64); + rxq->datasize = ALIGN(mtu + ETH_HLEN, 64); + + if (mtu > MANA_XDP_MTU_MAX) { + rxq->alloc_size = mtu + MANA_RXBUF_PAD; + rxq->headroom = 0; + } else { + rxq->alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM; + rxq->headroom = XDP_PACKET_HEADROOM; + } err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 037bcabf6b98..fee99d704281 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -291,6 +291,11 @@ struct mana_recv_buf_oob { struct gdma_posted_wqe_info wqe_inf; }; +#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ + + ETH_HLEN) + +#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) + struct mana_rxq { struct gdma_queue *gdma_rq; /* Cache the gdma receive queue id */ @@ -300,6 +305,8 @@ struct mana_rxq { u32 rxq_idx; u32 datasize; + u32 alloc_size; + u32 headroom; mana_handle_t rxobj; -- cgit v1.2.3 From 80f6215b450eb8e92d8b1f117abf5ecf867f963e Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Wed, 12 Apr 2023 14:16:03 -0700 Subject: net: mana: Add support for jumbo frame During probe, get the hardware-allowed max MTU by querying the device configuration. Users can select MTU up to the device limit. When XDP is in use, limit MTU settings so the buffer size is within one page. And, when MTU is set to a too large value, XDP is not allowed to run. Also, to prevent changing MTU fails, and leaves the NIC in a bad state, pre-allocate all buffers before starting the change. So in low memory condition, it will return error, without affecting the NIC. Signed-off-by: Haiyang Zhang Reviewed-by: Jesse Brandeburg Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/mana_bpf.c | 22 +-- drivers/net/ethernet/microsoft/mana/mana_en.c | 217 +++++++++++++++++++++++-- include/net/mana/gdma.h | 4 + include/net/mana/mana.h | 14 ++ 4 files changed, 233 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index 3caea631229c..23b1521c0df9 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -133,12 +133,6 @@ out: return act; } -static unsigned int mana_xdp_fraglen(unsigned int len) -{ - return SKB_DATA_ALIGN(len) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); -} - struct bpf_prog *mana_xdp_get(struct mana_port_context *apc) { ASSERT_RTNL(); @@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, { struct mana_port_context *apc = netdev_priv(ndev); struct bpf_prog *old_prog; - int buf_max; + struct gdma_context *gc; + + gc = apc->ac->gdma_dev->gdma_context; old_prog = mana_xdp_get(apc); if (!old_prog && !prog) return 0; - buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN); - if (prog && buf_max > PAGE_SIZE) { - netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n", - ndev->mtu, buf_max); + if (prog && ndev->mtu > MANA_XDP_MTU_MAX) { + netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n", + ndev->mtu, MANA_XDP_MTU_MAX); NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); return -EOPNOTSUPP; @@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, if (apc->port_is_up) mana_chn_setxdp(apc, prog); + if (prog) + ndev->max_mtu = MANA_XDP_MTU_MAX; + else + ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; + return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 8e7fa6e9c3b5..cabecbfa1102 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -427,6 +427,192 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, return txq; } +/* Release pre-allocated RX buffers */ +static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) +{ + struct device *dev; + int i; + + dev = mpc->ac->gdma_dev->gdma_context->dev; + + if (!mpc->rxbufs_pre) + goto out1; + + if (!mpc->das_pre) + goto out2; + + while (mpc->rxbpre_total) { + i = --mpc->rxbpre_total; + dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(mpc->rxbufs_pre[i])); + } + + kfree(mpc->das_pre); + mpc->das_pre = NULL; + +out2: + kfree(mpc->rxbufs_pre); + mpc->rxbufs_pre = NULL; + +out1: + mpc->rxbpre_datasize = 0; + mpc->rxbpre_alloc_size = 0; + mpc->rxbpre_headroom = 0; +} + +/* Get a buffer from the pre-allocated RX buffers */ +static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) +{ + struct net_device *ndev = rxq->ndev; + struct mana_port_context *mpc; + void *va; + + mpc = netdev_priv(ndev); + + if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { + netdev_err(ndev, "No RX pre-allocated bufs\n"); + return NULL; + } + + /* Check sizes to catch unexpected coding error */ + if (mpc->rxbpre_datasize != rxq->datasize) { + netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n", + mpc->rxbpre_datasize, rxq->datasize); + return NULL; + } + + if (mpc->rxbpre_alloc_size != rxq->alloc_size) { + netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n", + mpc->rxbpre_alloc_size, rxq->alloc_size); + return NULL; + } + + if (mpc->rxbpre_headroom != rxq->headroom) { + netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n", + mpc->rxbpre_headroom, rxq->headroom); + return NULL; + } + + mpc->rxbpre_total--; + + *da = mpc->das_pre[mpc->rxbpre_total]; + va = mpc->rxbufs_pre[mpc->rxbpre_total]; + mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; + + /* Deallocate the array after all buffers are gone */ + if (!mpc->rxbpre_total) + mana_pre_dealloc_rxbufs(mpc); + + return va; +} + +/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ +static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, + u32 *headroom) +{ + if (mtu > MANA_XDP_MTU_MAX) + *headroom = 0; /* no support for XDP */ + else + *headroom = XDP_PACKET_HEADROOM; + + *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; + + *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN); +} + +static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) +{ + struct device *dev; + struct page *page; + dma_addr_t da; + int num_rxb; + void *va; + int i; + + mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, + &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); + + dev = mpc->ac->gdma_dev->gdma_context->dev; + + num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; + + WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); + mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); + if (!mpc->rxbufs_pre) + goto error; + + mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); + if (!mpc->das_pre) + goto error; + + mpc->rxbpre_total = 0; + + for (i = 0; i < num_rxb; i++) { + if (mpc->rxbpre_alloc_size > PAGE_SIZE) { + va = netdev_alloc_frag(mpc->rxbpre_alloc_size); + if (!va) + goto error; + } else { + page = dev_alloc_page(); + if (!page) + goto error; + + va = page_to_virt(page); + } + + da = dma_map_single(dev, va + mpc->rxbpre_headroom, + mpc->rxbpre_datasize, DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, da)) { + put_page(virt_to_head_page(va)); + goto error; + } + + mpc->rxbufs_pre[i] = va; + mpc->das_pre[i] = da; + mpc->rxbpre_total = i + 1; + } + + return 0; + +error: + mana_pre_dealloc_rxbufs(mpc); + return -ENOMEM; +} + +static int mana_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct mana_port_context *mpc = netdev_priv(ndev); + unsigned int old_mtu = ndev->mtu; + int err; + + /* Pre-allocate buffers to prevent failure in mana_attach later */ + err = mana_pre_alloc_rxbufs(mpc, new_mtu); + if (err) { + netdev_err(ndev, "Insufficient memory for new MTU\n"); + return err; + } + + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, "mana_detach failed: %d\n", err); + goto out; + } + + ndev->mtu = new_mtu; + + err = mana_attach(ndev); + if (err) { + netdev_err(ndev, "mana_attach failed: %d\n", err); + ndev->mtu = old_mtu; + } + +out: + mana_pre_dealloc_rxbufs(mpc); + return err; +} + static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, @@ -436,6 +622,7 @@ static const struct net_device_ops mana_devops = { .ndo_get_stats64 = mana_get_stats64, .ndo_bpf = mana_bpf, .ndo_xdp_xmit = mana_xdp_xmit, + .ndo_change_mtu = mana_change_mtu, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -625,6 +812,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, sizeof(req), sizeof(resp)); + + req.hdr.resp.msg_version = GDMA_MESSAGE_V2; + req.proto_major_ver = proto_major_ver; req.proto_minor_ver = proto_minor_ver; req.proto_micro_ver = proto_micro_ver; @@ -647,6 +837,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, *max_num_vports = resp.max_num_vports; + if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2) + gc->adapter_mtu = resp.adapter_mtu; + else + gc->adapter_mtu = ETH_FRAME_LEN; + return 0; } @@ -1712,10 +1907,14 @@ static void mana_destroy_rxq(struct mana_port_context *apc, static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, struct mana_rxq *rxq, struct device *dev) { + struct mana_port_context *mpc = netdev_priv(rxq->ndev); dma_addr_t da; void *va; - va = mana_get_rxfrag(rxq, dev, &da, false); + if (mpc->rxbufs_pre) + va = mana_get_rxbuf_pre(rxq, &da); + else + va = mana_get_rxfrag(rxq, dev, &da, false); if (!va) return -ENOMEM; @@ -1797,7 +1996,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_obj_spec wq_spec; struct mana_obj_spec cq_spec; - unsigned int mtu = ndev->mtu; struct gdma_queue_spec spec; struct mana_cq *cq = NULL; struct gdma_context *gc; @@ -1817,15 +2015,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->rxq_idx = rxq_idx; rxq->rxobj = INVALID_MANA_HANDLE; - rxq->datasize = ALIGN(mtu + ETH_HLEN, 64); - - if (mtu > MANA_XDP_MTU_MAX) { - rxq->alloc_size = mtu + MANA_RXBUF_PAD; - rxq->headroom = 0; - } else { - rxq->alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM; - rxq->headroom = XDP_PACKET_HEADROOM; - } + mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, + &rxq->headroom); err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) @@ -2238,8 +2429,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->netdev_ops = &mana_devops; ndev->ethtool_ops = &mana_ethtool_ops; ndev->mtu = ETH_DATA_LEN; - ndev->max_mtu = ndev->mtu; - ndev->min_mtu = ndev->mtu; + ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; + ndev->min_mtu = ETH_MIN_MTU; ndev->needed_headroom = MANA_HEADROOM; ndev->dev_port = port_idx; SET_NETDEV_DEV(ndev, gc->dev); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 56189e4252da..96c120160f15 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -145,6 +145,7 @@ struct gdma_general_req { }; /* HW DATA */ #define GDMA_MESSAGE_V1 1 +#define GDMA_MESSAGE_V2 2 struct gdma_general_resp { struct gdma_resp_hdr hdr; @@ -354,6 +355,9 @@ struct gdma_context { struct gdma_resource msix_resource; struct gdma_irq_context *irq_contexts; + /* L2 MTU */ + u16 adapter_mtu; + /* This maps a CQ index to the queue structure. */ unsigned int max_num_cqs; struct gdma_queue **cq_table; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index fee99d704281..cd386aa7c7cc 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -37,6 +37,7 @@ enum TRI_STATE { #define COMP_ENTRY_SIZE 64 #define RX_BUFFERS_PER_QUEUE 512 +#define MANA_RX_DATA_ALIGN 64 #define MAX_SEND_BUFFERS_PER_QUEUE 256 @@ -390,6 +391,14 @@ struct mana_port_context { /* This points to an array of num_queues of RQ pointers. */ struct mana_rxq **rxqs; + /* pre-allocated rx buffer array */ + void **rxbufs_pre; + dma_addr_t *das_pre; + int rxbpre_total; + u32 rxbpre_datasize; + u32 rxbpre_alloc_size; + u32 rxbpre_headroom; + struct bpf_prog *bpf_prog; /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ @@ -489,6 +498,11 @@ struct mana_query_device_cfg_resp { u16 max_num_vports; u16 reserved; u32 max_num_eqs; + + /* response v2: */ + u16 adapter_mtu; + u16 reserved2; + u32 reserved3; }; /* HW DATA */ /* Query vPort Configuration */ -- cgit v1.2.3 From 5c74064f43c291d9add2b436a2d70205b71a7cc7 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Fri, 21 Apr 2023 10:06:57 -0700 Subject: net: mana: Rename mana_refill_rxoob and remove some empty lines Rename mana_refill_rxoob for naming consistency. And remove some empty lines between function call and error checking. Signed-off-by: Haiyang Zhang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microsoft/mana/mana_en.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index cabecbfa1102..db2887e25714 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -563,7 +563,6 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) da = dma_map_single(dev, va + mpc->rxbpre_headroom, mpc->rxbpre_datasize, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, da)) { put_page(virt_to_head_page(va)); goto error; @@ -1515,7 +1514,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *da)) { put_page(virt_to_head_page(va)); return NULL; @@ -1525,14 +1523,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, } /* Allocate frag for rx buffer, and save the old buf */ -static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq, - struct mana_recv_buf_oob *rxoob, void **old_buf) +static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, + struct mana_recv_buf_oob *rxoob, void **old_buf) { dma_addr_t da; void *va; va = mana_get_rxfrag(rxq, dev, &da, true); - if (!va) return; @@ -1597,7 +1594,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf); + mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); /* Unsuccessful refill will have old_buf == NULL. * In this case, mana_rx_skb() will drop the packet. -- cgit v1.2.3 From df18f2da302f169e1a29098c6ca3b474f1b0269e Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Fri, 21 Apr 2023 10:06:58 -0700 Subject: net: mana: Check if netdev/napi_alloc_frag returns single page netdev/napi_alloc_frag() may fall back to single page which is smaller than the requested size. Add error checking to avoid memory overwritten. Signed-off-by: Haiyang Zhang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microsoft/mana/mana_en.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index db2887e25714..06d6292e09b3 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -553,6 +553,14 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) va = netdev_alloc_frag(mpc->rxbpre_alloc_size); if (!va) goto error; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < + get_order(mpc->rxbpre_alloc_size)) { + put_page(page); + goto error; + } } else { page = dev_alloc_page(); if (!page) @@ -1504,6 +1512,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, if (!va) return NULL; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < get_order(rxq->alloc_size)) { + put_page(page); + return NULL; + } } else { page = dev_alloc_page(); if (!page) -- cgit v1.2.3 From 1919b39fc6eabb9a6f9a51706ff6d03865f5df29 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Fri, 26 May 2023 08:38:57 -0700 Subject: net: mana: Fix perf regression: remove rx_cqes, tx_cqes counters The apc->eth_stats.rx_cqes is one per NIC (vport), and it's on the frequent and parallel code path of all queues. So, r/w into this single shared variable by many threads on different CPUs creates a lot caching and memory overhead, hence perf regression. And, it's not accurate due to the high volume concurrent r/w. For example, a workload is iperf with 128 threads, and with RPS enabled. We saw perf regression of 25% with the previous patch adding the counters. And this patch eliminates the regression. Since the error path of mana_poll_rx_cq() already has warnings, so keeping the counter and convert it to a per-queue variable is not necessary. So, just remove this counter from this high frequency code path. Also, remove the tx_cqes counter for the same reason. We have warnings & other counters for errors on that path, and don't need to count every normal cqe processing. Cc: stable@vger.kernel.org Fixes: bd7fc6e1957c ("net: mana: Add new MANA VF performance counters for easier troubleshooting") Signed-off-by: Haiyang Zhang Reviewed-by: Horatiu Vultur Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/1685115537-31675-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/microsoft/mana/mana_en.c | 10 ---------- drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 2 -- include/net/mana/mana.h | 2 -- 3 files changed, 14 deletions(-) (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 06d6292e09b3..d907727c7b7a 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1279,8 +1279,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (comp_read < 1) return; - apc->eth_stats.tx_cqes = comp_read; - for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1363,8 +1361,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) WARN_ON_ONCE(1); cq->work_done = pkt_transmitted; - - apc->eth_stats.tx_cqes -= pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1626,15 +1622,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq) { struct gdma_comp *comp = cq->gdma_comp_buf; struct mana_rxq *rxq = cq->rxq; - struct mana_port_context *apc; int comp_read, i; - apc = netdev_priv(rxq->ndev); - comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); - apc->eth_stats.rx_cqes = comp_read; rxq->xdp_flush = false; for (i = 0; i < comp_read; i++) { @@ -1646,8 +1638,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq) return; mana_process_rx_cqe(rxq, cq, &comp[i]); - - apc->eth_stats.rx_cqes--; } if (rxq->xdp_flush) diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index a64c81410dc1..0dc78679f620 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,11 +13,9 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, - {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)}, {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, tx_cqe_unknown_type)}, - {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)}, {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, rx_coalesced_err)}, {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index cd386aa7c7cc..9eef19972845 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -347,10 +347,8 @@ struct mana_tx_qp { struct mana_ethtool_stats { u64 stop_queue; u64 wake_queue; - u64 tx_cqes; u64 tx_cqe_err; u64 tx_cqe_unknown_type; - u64 rx_cqes; u64 rx_coalesced_err; u64 rx_cqe_unknown_type; }; -- cgit v1.2.3