diff options
| author | David S. Miller <davem@davemloft.net> | 2025-06-21 16:26:25 +0300 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2025-06-21 16:26:25 +0300 |
| commit | 46d1816f1bda7eb6362ade75c45874202a4bb468 (patch) | |
| tree | 3262cf20adfdd8ea02126b2d70963df5a3065fc4 | |
| parent | 0289c51f889e435a7626931688023a80090d9c5e (diff) | |
| parent | d8a8ca14c93739250bc770f9aa33eb38e23b1f37 (diff) | |
| download | linux-46d1816f1bda7eb6362ade75c45874202a4bb468.tar.xz | |
Merge branch 'gve-xdp-tx-redirect' into main
Joshua Washington says:
====================
gve: XDP TX and redirect support for DQ RDA
A previous patch series[1] introduced the ability to process XDP buffers
to the DQ RDA queue format. This is a follow-up patch series to
introduce XDP_TX and XDP_REDIRECT support and expose XDP support to the
kernel.
Link: https://git.kernel.org/netdev/net-next/c/e2ac75a8a967 [1]
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 27 | ||||
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve_dqo.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve_main.c | 42 | ||||
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx_dqo.c | 77 | ||||
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx.c | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx_dqo.c | 236 |
6 files changed, 314 insertions, 74 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4469442d4940..cf91195d5f39 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -402,8 +402,16 @@ enum gve_packet_state { GVE_PACKET_STATE_TIMED_OUT_COMPL, }; +enum gve_tx_pending_packet_dqo_type { + GVE_TX_PENDING_PACKET_DQO_SKB, + GVE_TX_PENDING_PACKET_DQO_XDP_FRAME +}; + struct gve_tx_pending_packet_dqo { - struct sk_buff *skb; /* skb for this packet */ + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; /* 0th element corresponds to the linear portion of `skb`, should be * unmapped with `dma_unmap_single`. @@ -433,7 +441,10 @@ struct gve_tx_pending_packet_dqo { /* Identifies the current state of the packet as defined in * `enum gve_packet_state`. */ - u8 state; + u8 state : 2; + + /* gve_tx_pending_packet_dqo_type */ + u8 type : 1; /* If packet is an outstanding miss completion, then the packet is * freed if the corresponding re-injection completion is not received @@ -455,6 +466,9 @@ struct gve_tx_ring { /* DQO fields. */ struct { + /* Spinlock for XDP tx traffic */ + spinlock_t xdp_lock; + /* Linked list of gve_tx_pending_packet_dqo. Index into * pending_packets, or -1 if empty. * @@ -1155,6 +1169,7 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) { switch (priv->queue_format) { case GVE_GQI_QPL_FORMAT: + case GVE_DQO_RDA_FORMAT: return true; default: return false; @@ -1178,11 +1193,15 @@ void gve_free_queue_page_list(struct gve_priv *priv, u32 id); /* tx handling */ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); -int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, - u32 flags); +int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, void *data, int len, void *frame_p); void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); +int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, + struct xdp_frame *xdpf); bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_xdp_poll(struct gve_notify_block *block, int budget); int gve_xsk_tx_poll(struct gve_notify_block *block, int budget); diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h index e83773fb891f..bb278727f4d9 100644 --- a/drivers/net/ethernet/google/gve/gve_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_dqo.h @@ -37,6 +37,7 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean); +bool gve_xdp_poll_dqo(struct gve_notify_block *block); int gve_rx_poll_dqo(struct gve_notify_block *block, int budget); int gve_tx_alloc_rings_dqo(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg); @@ -60,6 +61,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, struct napi_struct *napi); void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx); void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx); +void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid); static inline void gve_tx_put_doorbell_dqo(const struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 28e4795f5f40..27f97a1d2957 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -414,8 +414,12 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget) bool reschedule = false; int work_done = 0; - if (block->tx) - reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + if (block->tx) { + if (block->tx->q_num < priv->tx_cfg.num_queues) + reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + else + reschedule |= gve_xdp_poll_dqo(block); + } if (!budget) return 0; @@ -1516,6 +1520,19 @@ out: return err; } +static int gve_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + + if (priv->queue_format == GVE_GQI_QPL_FORMAT) + return gve_xdp_xmit_gqi(dev, n, frames, flags); + else if (priv->queue_format == GVE_DQO_RDA_FORMAT) + return gve_xdp_xmit_dqo(dev, n, frames, flags); + + return -EOPNOTSUPP; +} + static int gve_xsk_pool_enable(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) @@ -1651,9 +1668,8 @@ static int verify_xdp_configuration(struct net_device *dev) return -EOPNOTSUPP; } - if (priv->queue_format != GVE_GQI_QPL_FORMAT) { - netdev_warn(dev, "XDP is not supported in mode %d.\n", - priv->queue_format); + if (priv->header_split_enabled) { + netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n"); return -EOPNOTSUPP; } @@ -1977,10 +1993,13 @@ u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit) return GVE_DEFAULT_RX_BUFFER_SIZE; } -/* header-split is not supported on non-DQO_RDA yet even if device advertises it */ +/* Header split is only supported on DQ RDA queue format. If XDP is enabled, + * header split is not allowed. + */ bool gve_header_split_supported(const struct gve_priv *priv) { - return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT; + return priv->header_buf_size && + priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog; } int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) @@ -2029,6 +2048,12 @@ static int gve_set_features(struct net_device *netdev, if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { netdev->features ^= NETIF_F_LRO; + if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) { + netdev_warn(netdev, + "XDP is not supported when LRO is on.\n"); + err = -EOPNOTSUPP; + goto revert_features; + } if (netif_running(netdev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) @@ -2230,6 +2255,9 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv) xdp_features = NETDEV_XDP_ACT_BASIC; xdp_features |= NETDEV_XDP_ACT_REDIRECT; xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + xdp_features = NETDEV_XDP_ACT_BASIC; + xdp_features |= NETDEV_XDP_ACT_REDIRECT; } else { xdp_features = 0; } diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 0be41a0cdd15..96743e1d80f3 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -8,6 +8,7 @@ #include "gve_dqo.h" #include "gve_adminq.h" #include "gve_utils.h" +#include <linux/bpf.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/skbuff.h> @@ -570,27 +571,66 @@ static int gve_rx_append_frags(struct napi_struct *napi, return 0; } +static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp) +{ + struct gve_tx_ring *tx; + struct xdp_frame *xdpf; + u32 tx_qid; + int err; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return -ENOSPC; + + tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); + tx = &priv->tx[tx_qid]; + spin_lock(&tx->dqo_tx.xdp_lock); + err = gve_xdp_xmit_one_dqo(priv, tx, xdpf); + spin_unlock(&tx->dqo_tx.xdp_lock); + + return err; +} + static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, struct xdp_buff *xdp, struct bpf_prog *xprog, int xdp_act, struct gve_rx_buf_state_dqo *buf_state) { - u64_stats_update_begin(&rx->statss); + int err; switch (xdp_act) { case XDP_ABORTED: case XDP_DROP: default: - rx->xdp_actions[xdp_act]++; + gve_free_buffer(rx, buf_state); break; case XDP_TX: - rx->xdp_tx_errors++; + err = gve_xdp_tx_dqo(priv, rx, xdp); + if (unlikely(err)) + goto err; + gve_reuse_buffer(rx, buf_state); break; case XDP_REDIRECT: - rx->xdp_redirect_errors++; + err = xdp_do_redirect(priv->dev, xdp, xprog); + if (unlikely(err)) + goto err; + gve_reuse_buffer(rx, buf_state); break; } + u64_stats_update_begin(&rx->statss); + if ((u32)xdp_act < GVE_XDP_ACTIONS) + rx->xdp_actions[xdp_act]++; + u64_stats_update_end(&rx->statss); + return; +err: + u64_stats_update_begin(&rx->statss); + if (xdp_act == XDP_TX) + rx->xdp_tx_errors++; + else if (xdp_act == XDP_REDIRECT) + rx->xdp_redirect_errors++; u64_stats_update_end(&rx->statss); gve_free_buffer(rx, buf_state); + return; } /* Returns 0 if descriptor is completed successfully. @@ -812,16 +852,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) { - struct napi_struct *napi = &block->napi; - netdev_features_t feat = napi->dev->features; - - struct gve_rx_ring *rx = block->rx; - struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; - + struct gve_rx_compl_queue_dqo *complq; + struct napi_struct *napi; + netdev_features_t feat; + struct gve_rx_ring *rx; + struct gve_priv *priv; + u64 xdp_redirects; u32 work_done = 0; u64 bytes = 0; + u64 xdp_txs; int err; + napi = &block->napi; + feat = napi->dev->features; + + rx = block->rx; + priv = rx->gve; + complq = &rx->dqo.complq; + + xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; + xdp_txs = rx->xdp_actions[XDP_TX]; + while (work_done < budget) { struct gve_rx_compl_desc_dqo *compl_desc = &complq->desc_ring[complq->head]; @@ -895,6 +946,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) rx->ctx.skb_tail = NULL; } + if (xdp_txs != rx->xdp_actions[XDP_TX]) + gve_xdp_tx_flush_dqo(priv, rx->q_num); + + if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) + xdp_do_flush(); + gve_rx_post_buffers_dqo(rx); u64_stats_update_begin(&rx->statss); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 1b40bf0c811a..c6ff0968929d 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -823,8 +823,8 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, return ndescs; } -int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, - u32 flags) +int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) { struct gve_priv *priv = netdev_priv(dev); struct gve_tx_ring *tx; diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 9d705d94b065..ce5370b741ec 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -9,6 +9,7 @@ #include "gve_utils.h" #include "gve_dqo.h" #include <net/ip.h> +#include <linux/bpf.h> #include <linux/tcp.h> #include <linux/slab.h> #include <linux/skbuff.h> @@ -110,6 +111,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx) return false; } +void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid) +{ + u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid); + struct gve_tx_ring *tx = &priv->tx[tx_qid]; + + gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); +} + static struct gve_tx_pending_packet_dqo * gve_alloc_pending_packet(struct gve_tx_ring *tx) { @@ -198,7 +207,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx) gve_remove_napi(priv, ntfy_idx); gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); - netdev_tx_reset_queue(tx->netdev_txq); + if (tx->netdev_txq) + netdev_tx_reset_queue(tx->netdev_txq); gve_tx_clean_pending_packets(tx); gve_tx_remove_from_block(priv, idx); } @@ -276,7 +286,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx) gve_tx_add_to_block(priv, idx); - tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); + if (idx < priv->tx_cfg.num_queues) + tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo); } @@ -295,6 +306,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, memset(tx, 0, sizeof(*tx)); tx->q_num = idx; tx->dev = hdev; + spin_lock_init(&tx->dqo_tx.xdp_lock); atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); /* Queue sizes must be a power of 2 */ @@ -439,12 +451,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) return tx->mask - num_used; } +/* Checks if the requested number of slots are available in the ring */ +static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req) +{ + u32 num_avail = num_avail_tx_slots(tx); + + slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP; + + if (num_avail >= slots_req) + return true; + + /* Update cached TX head pointer */ + tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); + + return num_avail_tx_slots(tx) >= slots_req; +} + static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, int desc_count, int buf_count) { return gve_has_pending_packet(tx) && - num_avail_tx_slots(tx) >= desc_count && - gve_has_free_tx_qpl_bufs(tx, buf_count); + gve_has_tx_slots_available(tx, desc_count) && + gve_has_free_tx_qpl_bufs(tx, buf_count); } /* Stops the queue if available descriptors is less than 'count'. @@ -456,12 +484,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return 0; - /* Update cached TX head pointer */ - tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - - if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) - return 0; - /* No space, so stop the queue */ tx->stop_queue++; netif_tx_stop_queue(tx->netdev_txq); @@ -472,8 +494,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, /* After stopping queue, check if we can transmit again in order to * avoid TOCTOU bug. */ - tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return -EBUSY; @@ -500,11 +520,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb, } static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, - struct sk_buff *skb, u32 len, u64 addr, + bool enable_csum, u32 len, u64 addr, s16 compl_tag, bool eop, bool is_gso) { - const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL; - while (len > 0) { struct gve_tx_pkt_desc_dqo *desc = &tx->dqo.tx_ring[*desc_idx].pkt; @@ -515,7 +533,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, .buf_addr = cpu_to_le64(addr), .dtype = GVE_TX_PKT_DESC_DTYPE_DQO, .end_of_packet = cur_eop, - .checksum_offload_enable = checksum_offload_en, + .checksum_offload_enable = enable_csum, .compl_tag = cpu_to_le16(compl_tag), .buf_size = cur_len, }; @@ -612,6 +630,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc, }; } +static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx) +{ + u32 last_desc_idx = (desc_idx - 1) & tx->mask; + u32 last_report_event_interval = + (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; + + /* Commit the changes to our state */ + tx->dqo_tx.tail = desc_idx; + + /* Request a descriptor completion on the last descriptor of the + * packet if we are allowed to by the HW enforced interval. + */ + + if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) { + tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; + tx->dqo_tx.last_re_idx = last_desc_idx; + } +} + static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, struct sk_buff *skb, struct gve_tx_pending_packet_dqo *pkt, @@ -619,6 +656,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, u32 *desc_idx, bool is_gso) { + bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL; const struct skb_shared_info *shinfo = skb_shinfo(skb); int i; @@ -644,7 +682,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr, completion_tag, /*eop=*/shinfo->nr_frags == 0, is_gso); } @@ -664,7 +702,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr, completion_tag, is_eop, is_gso); } @@ -709,6 +747,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, u32 *desc_idx, bool is_gso) { + bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL; u32 copy_offset = 0; dma_addr_t dma_addr; u32 copy_len; @@ -730,7 +769,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, copy_offset += copy_len; dma_sync_single_for_device(tx->dev, dma_addr, copy_len, DMA_TO_DEVICE); - gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, copy_len, dma_addr, completion_tag, @@ -768,6 +807,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, return -ENOMEM; pkt->skb = skb; + pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB; completion_tag = pkt - tx->dqo.pending_packets; gve_extract_tx_metadata_dqo(skb, &metadata); @@ -800,24 +840,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; - /* Commit the changes to our state */ - tx->dqo_tx.tail = desc_idx; - - /* Request a descriptor completion on the last descriptor of the - * packet if we are allowed to by the HW enforced interval. - */ - { - u32 last_desc_idx = (desc_idx - 1) & tx->mask; - u32 last_report_event_interval = - (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; - - if (unlikely(last_report_event_interval >= - GVE_TX_MIN_RE_INTERVAL)) { - tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; - tx->dqo_tx.last_re_idx = last_desc_idx; - } - } - + gve_tx_update_tail(tx, desc_idx); return 0; err: @@ -951,9 +974,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, /* Metadata + (optional TSO) + data descriptors. */ total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs; - if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + - GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP, - num_buffer_descs))) { + if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs, + num_buffer_descs))) { return -1; } @@ -1107,16 +1129,32 @@ static void gve_handle_packet_completion(struct gve_priv *priv, } } tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; - if (tx->dqo.qpl) - gve_free_tx_qpl_bufs(tx, pending_packet); - else + + switch (pending_packet->type) { + case GVE_TX_PENDING_PACKET_DQO_SKB: + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); + (*pkts)++; + *bytes += pending_packet->skb->len; + + napi_consume_skb(pending_packet->skb, is_napi); + pending_packet->skb = NULL; + gve_free_pending_packet(tx, pending_packet); + break; + case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME: gve_unmap_packet(tx->dev, pending_packet); + (*pkts)++; + *bytes += pending_packet->xdpf->len; - *bytes += pending_packet->skb->len; - (*pkts)++; - napi_consume_skb(pending_packet->skb, is_napi); - pending_packet->skb = NULL; - gve_free_pending_packet(tx, pending_packet); + xdp_return_frame(pending_packet->xdpf); + pending_packet->xdpf = NULL; + gve_free_pending_packet(tx, pending_packet); + break; + default: + WARN_ON_ONCE(1); + } } static void gve_handle_miss_completion(struct gve_priv *priv, @@ -1287,9 +1325,10 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, num_descs_cleaned++; } - netdev_tx_completed_queue(tx->netdev_txq, - pkt_compl_pkts + miss_compl_pkts, - pkt_compl_bytes + miss_compl_bytes); + if (tx->netdev_txq) + netdev_tx_completed_queue(tx->netdev_txq, + pkt_compl_pkts + miss_compl_pkts, + pkt_compl_bytes + miss_compl_bytes); remove_miss_completions(priv, tx); remove_timed_out_completions(priv, tx); @@ -1325,3 +1364,98 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean) compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; return compl_desc->generation != tx->dqo_compl.cur_gen_bit; } + +bool gve_xdp_poll_dqo(struct gve_notify_block *block) +{ + struct gve_tx_compl_desc *compl_desc; + struct gve_tx_ring *tx = block->tx; + struct gve_priv *priv = block->priv; + + gve_clean_tx_done_dqo(priv, tx, &block->napi); + + /* Return true if we still have work. */ + compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; + return compl_desc->generation != tx->dqo_compl.cur_gen_bit; +} + +int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, + struct xdp_frame *xdpf) +{ + struct gve_tx_pending_packet_dqo *pkt; + u32 desc_idx = tx->dqo_tx.tail; + s16 completion_tag; + int num_descs = 1; + dma_addr_t addr; + int err; + + if (unlikely(!gve_has_tx_slots_available(tx, num_descs))) + return -EBUSY; + + pkt = gve_alloc_pending_packet(tx); + if (unlikely(!pkt)) + return -EBUSY; + + pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME; + pkt->num_bufs = 0; + pkt->xdpf = xdpf; + completion_tag = pkt - tx->dqo.pending_packets; + + /* Generate Packet Descriptor */ + addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + err = dma_mapping_error(tx->dev, addr); + if (unlikely(err)) + goto err; + + dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + pkt->num_bufs++; + + gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, + false, xdpf->len, + addr, completion_tag, true, + false); + + gve_tx_update_tail(tx, desc_idx); + return 0; + +err: + pkt->xdpf = NULL; + pkt->num_bufs = 0; + gve_free_pending_packet(tx, pkt); + return err; +} + +int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx; + int i, err = 0, qid; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + qid = gve_xdp_tx_queue_id(priv, + smp_processor_id() % priv->tx_cfg.num_xdp_queues); + + tx = &priv->tx[qid]; + + spin_lock(&tx->dqo_tx.xdp_lock); + for (i = 0; i < n; i++) { + err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]); + if (err) + break; + } + + if (flags & XDP_XMIT_FLUSH) + gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); + + spin_unlock(&tx->dqo_tx.xdp_lock); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xmit += n; + tx->xdp_xmit_errors += n - i; + u64_stats_update_end(&tx->statss); + + return i ? i : err; +} |
