diff options
-rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 72 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_adminq.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_ethtool.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_main.c | 288 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx_dqo.c | 81 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx.c | 41 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx_dqo.c | 31 |
9 files changed, 250 insertions, 345 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 483c43bab3a9..2fab38c8ee78 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -59,6 +59,8 @@ #define GVE_MAX_RX_BUFFER_SIZE 4096 +#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 + #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4 @@ -227,6 +229,11 @@ struct gve_rx_cnts { /* Contains datapath state used to represent an RX queue. */ struct gve_rx_ring { struct gve_priv *gve; + + u16 packet_buffer_size; /* Size of buffer posted to NIC */ + u16 packet_buffer_truesize; /* Total size of RX buffer */ + u16 rx_headroom; + union { /* GQI fields */ struct { @@ -235,7 +242,6 @@ struct gve_rx_ring { /* threshold for posting new buffs and descs */ u32 db_threshold; - u16 packet_buffer_size; u32 qpl_copy_pool_mask; u32 qpl_copy_pool_head; @@ -613,8 +619,6 @@ struct gve_tx_ring { dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct xsk_buff_pool *xsk_pool; - u32 xdp_xsk_wakeup; - u32 xdp_xsk_done; u64 xdp_xsk_sent; u64 xdp_xmit; u64 xdp_xmit_errors; @@ -633,10 +637,18 @@ struct gve_notify_block { u32 irq; }; -/* Tracks allowed and current queue settings */ -struct gve_queue_config { +/* Tracks allowed and current rx queue settings */ +struct gve_rx_queue_config { + u16 max_queues; + u16 num_queues; + u16 packet_buffer_size; +}; + +/* Tracks allowed and current tx queue settings */ +struct gve_tx_queue_config { u16 max_queues; - u16 num_queues; /* current */ + u16 num_queues; /* number of TX queues, excluding XDP queues */ + u16 num_xdp_queues; }; /* Tracks the available and used qpl IDs */ @@ -660,11 +672,11 @@ struct gve_ptype_lut { /* Parameters for allocating resources for tx queues */ struct gve_tx_alloc_rings_cfg { - struct gve_queue_config *qcfg; + struct gve_tx_queue_config *qcfg; + + u16 num_xdp_rings; u16 ring_size; - u16 start_idx; - u16 num_rings; bool raw_addressing; /* Allocated resources are returned here */ @@ -674,14 +686,15 @@ struct gve_tx_alloc_rings_cfg { /* Parameters for allocating resources for rx queues */ struct gve_rx_alloc_rings_cfg { /* tx config is also needed to determine QPL ids */ - struct gve_queue_config *qcfg; - struct gve_queue_config *qcfg_tx; + struct gve_rx_queue_config *qcfg_rx; + struct gve_tx_queue_config *qcfg_tx; u16 ring_size; u16 packet_buffer_size; bool raw_addressing; bool enable_header_split; bool reset_rss; + bool xdp; /* Allocated resources are returned here */ struct gve_rx_ring *rx; @@ -766,9 +779,8 @@ struct gve_priv { u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ - u16 num_xdp_queues; - struct gve_queue_config tx_cfg; - struct gve_queue_config rx_cfg; + struct gve_tx_queue_config tx_cfg; + struct gve_rx_queue_config rx_cfg; u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ @@ -838,7 +850,6 @@ struct gve_priv { struct gve_ptype_lut *ptype_lut_dqo; /* Must be a power of two. */ - u16 data_buffer_size_dqo; u16 max_rx_buffer_size; /* device limit */ enum gve_queue_format queue_format; @@ -1041,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv) } /* Returns the number of tx queue page lists */ -static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, - int num_xdp_queues, +static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg, bool is_qpl) { if (!is_qpl) return 0; - return tx_cfg->num_queues + num_xdp_queues; -} - -/* Returns the number of XDP tx queue page lists - */ -static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) -{ - if (priv->queue_format != GVE_GQI_QPL_FORMAT) - return 0; - - return priv->num_xdp_queues; + return tx_cfg->num_queues + tx_cfg->num_xdp_queues; } /* Returns the number of rx queue page lists */ -static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, +static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg, bool is_qpl) { if (!is_qpl) @@ -1079,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) return priv->tx_cfg.max_queues + rx_qid; } -static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) +static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg, + int rx_qid) { return tx_cfg->max_queues + rx_qid; } @@ -1089,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) return gve_tx_qpl_id(priv, 0); } -static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) +static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg) { return gve_get_rx_qpl_id(tx_cfg, 0); } @@ -1120,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv) static inline u32 gve_num_tx_queues(struct gve_priv *priv) { - return priv->tx_cfg.num_queues + priv->num_xdp_queues; + return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; } static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) @@ -1224,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state); int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc); struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx); + struct gve_rx_ring *rx, + bool xdp); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); @@ -1236,8 +1238,8 @@ int gve_adjust_config(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); int gve_adjust_queues(struct gve_priv *priv, - struct gve_queue_config new_rx_config, - struct gve_queue_config new_tx_config, + struct gve_rx_queue_config new_rx_config, + struct gve_tx_queue_config new_tx_config, bool reset_rss); /* flow steering rule */ int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index be7a423e5ab9..3e8fc33cc11f 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, .ntfy_id = cpu_to_be32(rx->ntfy_id), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), + .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size), }; if (gve_is_gqi(priv)) { @@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, cpu_to_be64(rx->data.data_bus); cmd->create_rx_queue.index = cpu_to_be32(queue_index); cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { u32 qpl_id = 0; @@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, cpu_to_be64(rx->dqo.complq.bus); cmd->create_rx_queue.rx_data_ring_addr = cpu_to_be64(rx->dqo.bufq.bus); - cmd->create_rx_queue.packet_buffer_size = - cpu_to_be16(priv->data_buffer_size_dqo); cmd->create_rx_queue.rx_buff_ring_size = cpu_to_be16(priv->rx_desc_cnt); cmd->create_rx_queue.enable_rsc = diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c index af84cb88f828..a71883e1d920 100644 --- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + buf_state->page_info.pad = rx->rx_headroom; buf_state->last_single_ref_offset = 0; /* The page already has 1 ref. */ @@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state) void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - const u16 data_buffer_size = priv->data_buffer_size_dqo; + const u16 data_buffer_size = rx->packet_buffer_truesize; int pagecount; /* Can't reuse if we only fit one buffer per page */ @@ -217,10 +218,9 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx, static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - struct gve_priv *priv = rx->gve; netmem_ref netmem; - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; netmem = page_pool_alloc_netmem(rx->dqo.page_pool, &buf_state->page_info.page_offset, &buf_state->page_info.buf_size, @@ -232,12 +232,14 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, buf_state->page_info.netmem = netmem; buf_state->page_info.page_address = netmem_address(netmem); buf_state->addr = page_pool_get_dma_addr_netmem(netmem); + buf_state->page_info.pad = rx->dqo.page_pool->p.offset; return 0; } struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx) + struct gve_rx_ring *rx, + bool xdp) { u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); struct page_pool_params pp = { @@ -248,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, .netdev = priv->dev, .napi = &priv->ntfy_blocks[ntfy_id].napi, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = xdp ? XDP_PACKET_HEADROOM : 0, }; return page_pool_create(&pp); @@ -302,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) } desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_addr = cpu_to_le64(buf_state->addr + - buf_state->page_info.page_offset); + buf_state->page_info.page_offset + + buf_state->page_info.pad); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index a572f1e05934..31a21ccf4863 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", - "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", - "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" + "tx_dma_mapping_error[%u]", + "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" }; static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { @@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = value; } } - /* XDP xsk counters */ - data[i++] = tx->xdp_xsk_wakeup; - data[i++] = tx->xdp_xsk_done; + /* XDP counters */ do { start = u64_stats_fetch_begin(&priv->tx[ring].statss); data[i] = tx->xdp_xsk_sent; @@ -477,8 +475,8 @@ static int gve_set_channels(struct net_device *netdev, struct ethtool_channels *cmd) { struct gve_priv *priv = netdev_priv(netdev); - struct gve_queue_config new_tx_cfg = priv->tx_cfg; - struct gve_queue_config new_rx_cfg = priv->rx_cfg; + struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg; + struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg; struct ethtool_channels old_settings; int new_tx = cmd->tx_count; int new_rx = cmd->rx_count; @@ -493,10 +491,17 @@ static int gve_set_channels(struct net_device *netdev, if (!new_rx || !new_tx) return -EINVAL; - if (priv->num_xdp_queues && - (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { - dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); - return -EINVAL; + if (priv->xdp_prog) { + if (new_tx != new_rx || + (2 * new_tx > priv->tx_cfg.max_queues)) { + dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed"); + return -EINVAL; + } + + /* One XDP TX queue per RX queue. */ + new_tx_cfg.num_xdp_queues = new_rx; + } else { + new_tx_cfg.num_xdp_queues = 0; } if (new_rx != priv->rx_cfg.num_queues && @@ -642,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev, switch (etuna->id) { case ETHTOOL_RX_COPYBREAK: { - u32 max_copybreak = gve_is_gqi(priv) ? - GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo; + u32 max_copybreak = priv->rx_cfg.packet_buffer_size; len = *(u32 *)value; if (len > max_copybreak) diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 6dcdcaf518f4..cb2f9978f45e 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -795,30 +795,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx return rx->dqo.qpl; } -static int gve_register_xdp_qpls(struct gve_priv *priv) -{ - int start_id; - int err; - int i; - - start_id = gve_xdp_tx_start_queue_id(priv); - for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); - /* This failure will trigger a reset - no need to clean up */ - if (err) - return err; - } - return 0; -} - static int gve_register_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; int err; int i; - num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), - gve_is_qpl(priv)); + num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv)); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { @@ -836,30 +819,13 @@ static int gve_register_qpls(struct gve_priv *priv) return 0; } -static int gve_unregister_xdp_qpls(struct gve_priv *priv) -{ - int start_id; - int err; - int i; - - start_id = gve_xdp_tx_start_queue_id(priv); - for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); - /* This failure will trigger a reset - no need to clean */ - if (err) - return err; - } - return 0; -} - static int gve_unregister_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; int err; int i; - num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), - gve_is_qpl(priv)); + num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv)); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { @@ -878,27 +844,6 @@ static int gve_unregister_qpls(struct gve_priv *priv) return 0; } -static int gve_create_xdp_rings(struct gve_priv *priv) -{ - int err; - - err = gve_adminq_create_tx_queues(priv, - gve_xdp_tx_start_queue_id(priv), - priv->num_xdp_queues); - if (err) { - netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", - priv->num_xdp_queues); - /* This failure will trigger a reset - no need to clean - * up - */ - return err; - } - netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", - priv->num_xdp_queues); - - return 0; -} - static int gve_create_rings(struct gve_priv *priv) { int num_tx_queues = gve_num_tx_queues(priv); @@ -954,7 +899,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv) int i; /* Init stats */ - for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) { int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); u64_stats_init(&priv->tx[i].statss); @@ -979,24 +924,21 @@ static void gve_init_sync_stats(struct gve_priv *priv) static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { - int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0; - cfg->qcfg = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->ring_size = priv->tx_desc_cnt; - cfg->start_idx = 0; - cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues; + cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues; cfg->tx = priv->tx; } -static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings) +static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings) { int i; if (!priv->tx) return; - for (i = start_id; i < start_id + num_rings; i++) { + for (i = 0; i < num_rings; i++) { if (gve_is_gqi(priv)) gve_tx_stop_ring_gqi(priv, i); else @@ -1004,12 +946,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings } } -static void gve_tx_start_rings(struct gve_priv *priv, int start_id, - int num_rings) +static void gve_tx_start_rings(struct gve_priv *priv, int num_rings) { int i; - for (i = start_id; i < start_id + num_rings; i++) { + for (i = 0; i < num_rings; i++) { if (gve_is_gqi(priv)) gve_tx_start_ring_gqi(priv, i); else @@ -1017,28 +958,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id, } } -static int gve_alloc_xdp_rings(struct gve_priv *priv) -{ - struct gve_tx_alloc_rings_cfg cfg = {0}; - int err = 0; - - if (!priv->num_xdp_queues) - return 0; - - gve_tx_get_curr_alloc_cfg(priv, &cfg); - cfg.start_idx = gve_xdp_tx_start_queue_id(priv); - cfg.num_rings = priv->num_xdp_queues; - - err = gve_tx_alloc_rings_gqi(priv, &cfg); - if (err) - return err; - - gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings); - init_xdp_sync_stats(priv); - - return 0; -} - static int gve_queues_mem_alloc(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) @@ -1069,26 +988,6 @@ free_tx: return err; } -static int gve_destroy_xdp_rings(struct gve_priv *priv) -{ - int start_id; - int err; - - start_id = gve_xdp_tx_start_queue_id(priv); - err = gve_adminq_destroy_tx_queues(priv, - start_id, - priv->num_xdp_queues); - if (err) { - netif_err(priv, drv, priv->dev, - "failed to destroy XDP queues\n"); - /* This failure will trigger a reset - no need to clean up */ - return err; - } - netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); - - return 0; -} - static int gve_destroy_rings(struct gve_priv *priv) { int num_tx_queues = gve_num_tx_queues(priv); @@ -1113,20 +1012,6 @@ static int gve_destroy_rings(struct gve_priv *priv) return 0; } -static void gve_free_xdp_rings(struct gve_priv *priv) -{ - struct gve_tx_alloc_rings_cfg cfg = {0}; - - gve_tx_get_curr_alloc_cfg(priv, &cfg); - cfg.start_idx = gve_xdp_tx_start_queue_id(priv); - cfg.num_rings = priv->num_xdp_queues; - - if (priv->tx) { - gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings); - gve_tx_free_rings_gqi(priv, &cfg); - } -} - static void gve_queues_mem_free(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_cfg, struct gve_rx_alloc_rings_cfg *rx_cfg) @@ -1253,7 +1138,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) int i, j; u32 tx_qid; - if (!priv->num_xdp_queues) + if (!priv->tx_cfg.num_xdp_queues) return 0; for (i = 0; i < priv->rx_cfg.num_queues; i++) { @@ -1264,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) napi->napi_id); if (err) goto err; - err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); + if (gve_is_qpl(priv)) + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + else + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx->dqo.page_pool); if (err) goto err; rx->xsk_pool = xsk_get_pool_from_qid(dev, i); @@ -1283,7 +1174,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) } } - for (i = 0; i < priv->num_xdp_queues; i++) { + for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) { tx_qid = gve_xdp_tx_queue_id(priv, i); priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); } @@ -1304,7 +1195,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv) { int i, tx_qid; - if (!priv->num_xdp_queues) + if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx) return; for (i = 0; i < priv->rx_cfg.num_queues; i++) { @@ -1317,7 +1208,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv) } } - for (i = 0; i < priv->num_xdp_queues; i++) { + for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) { tx_qid = gve_xdp_tx_queue_id(priv, i); priv->tx[tx_qid].xsk_pool = NULL; } @@ -1334,15 +1225,14 @@ static void gve_drain_page_cache(struct gve_priv *priv) static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg) { - cfg->qcfg = &priv->rx_cfg; + cfg->qcfg_rx = &priv->rx_cfg; cfg->qcfg_tx = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->enable_header_split = priv->header_split_enabled; cfg->ring_size = priv->rx_desc_cnt; - cfg->packet_buffer_size = gve_is_gqi(priv) ? - GVE_DEFAULT_RX_BUFFER_SIZE : - priv->data_buffer_size_dqo; + cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size; cfg->rx = priv->rx; + cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues; } void gve_get_curr_alloc_cfgs(struct gve_priv *priv, @@ -1415,17 +1305,13 @@ static int gve_queues_start(struct gve_priv *priv, /* Record new configs into priv */ priv->tx_cfg = *tx_alloc_cfg->qcfg; - priv->rx_cfg = *rx_alloc_cfg->qcfg; + priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings; + priv->rx_cfg = *rx_alloc_cfg->qcfg_rx; priv->tx_desc_cnt = tx_alloc_cfg->ring_size; priv->rx_desc_cnt = rx_alloc_cfg->ring_size; - if (priv->xdp_prog) - priv->num_xdp_queues = priv->rx_cfg.num_queues; - else - priv->num_xdp_queues = 0; - - gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings); - gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); + gve_tx_start_rings(priv, gve_num_tx_queues(priv)); + gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues); gve_init_sync_stats(priv); err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); @@ -1450,7 +1336,7 @@ static int gve_queues_start(struct gve_priv *priv, goto reset; priv->header_split_enabled = rx_alloc_cfg->enable_header_split; - priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size; + priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size; err = gve_create_rings(priv); if (err) @@ -1477,7 +1363,7 @@ reset: /* return the original error */ return err; stop_and_free_rings: - gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); + gve_tx_stop_rings(priv, gve_num_tx_queues(priv)); gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); gve_queues_mem_remove(priv); return err; @@ -1526,7 +1412,7 @@ static int gve_queues_stop(struct gve_priv *priv) gve_unreg_xdp_info(priv); - gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); + gve_tx_stop_rings(priv, gve_num_tx_queues(priv)); gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); priv->interface_down_cnt++; @@ -1556,56 +1442,6 @@ static int gve_close(struct net_device *dev) return 0; } -static int gve_remove_xdp_queues(struct gve_priv *priv) -{ - int err; - - err = gve_destroy_xdp_rings(priv); - if (err) - return err; - - err = gve_unregister_xdp_qpls(priv); - if (err) - return err; - - gve_unreg_xdp_info(priv); - gve_free_xdp_rings(priv); - - priv->num_xdp_queues = 0; - return 0; -} - -static int gve_add_xdp_queues(struct gve_priv *priv) -{ - int err; - - priv->num_xdp_queues = priv->rx_cfg.num_queues; - - err = gve_alloc_xdp_rings(priv); - if (err) - goto err; - - err = gve_reg_xdp_info(priv, priv->dev); - if (err) - goto free_xdp_rings; - - err = gve_register_xdp_qpls(priv); - if (err) - goto free_xdp_rings; - - err = gve_create_xdp_rings(priv); - if (err) - goto free_xdp_rings; - - return 0; - -free_xdp_rings: - gve_free_xdp_rings(priv); -err: - priv->num_xdp_queues = 0; - return err; -} - static void gve_handle_link_status(struct gve_priv *priv, bool link_status) { if (!gve_get_napi_enabled(priv)) @@ -1623,6 +1459,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status) } } +static int gve_configure_rings_xdp(struct gve_priv *priv, + u16 num_xdp_rings) +{ + struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; + struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; + + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + tx_alloc_cfg.num_xdp_rings = num_xdp_rings; + + rx_alloc_cfg.xdp = !!num_xdp_rings; + return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); +} + static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -1635,29 +1484,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); + + /* Update priv XDP queue configuration */ + priv->tx_cfg.num_xdp_queues = priv->xdp_prog ? + priv->rx_cfg.num_queues : 0; return 0; } - gve_turndown(priv); - if (!old_prog && prog) { - // Allocate XDP TX queues if an XDP program is - // being installed - err = gve_add_xdp_queues(priv); - if (err) - goto out; - } else if (old_prog && !prog) { - // Remove XDP TX queues if an XDP program is - // being uninstalled - err = gve_remove_xdp_queues(priv); - if (err) - goto out; - } + if (!old_prog && prog) + err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues); + else if (old_prog && !prog) + err = gve_configure_rings_xdp(priv, 0); + + if (err) + goto out; + WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); out: - gve_turnup(priv); status = ioread32be(&priv->reg_bar0->device_status); gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); return err; @@ -1791,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) static int verify_xdp_configuration(struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); + u16 max_xdp_mtu; if (dev->features & NETIF_F_LRO) { netdev_warn(dev, "XDP is not supported when LRO is on.\n"); @@ -1803,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev) return -EOPNOTSUPP; } - if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) { + max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr); + if (priv->queue_format == GVE_GQI_QPL_FORMAT) + max_xdp_mtu -= GVE_RX_PAD; + + if (dev->mtu > max_xdp_mtu) { netdev_warn(dev, "XDP is not supported for mtu %d.\n", dev->mtu); return -EOPNOTSUPP; @@ -1908,13 +1759,12 @@ int gve_adjust_config(struct gve_priv *priv, } int gve_adjust_queues(struct gve_priv *priv, - struct gve_queue_config new_rx_config, - struct gve_queue_config new_tx_config, + struct gve_rx_queue_config new_rx_config, + struct gve_tx_queue_config new_tx_config, bool reset_rss) { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - int num_xdp_queues; int err; gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); @@ -1922,13 +1772,8 @@ int gve_adjust_queues(struct gve_priv *priv, /* Relay the new config from ethtool */ tx_alloc_cfg.qcfg = &new_tx_config; rx_alloc_cfg.qcfg_tx = &new_tx_config; - rx_alloc_cfg.qcfg = &new_rx_config; + rx_alloc_cfg.qcfg_rx = &new_rx_config; rx_alloc_cfg.reset_rss = reset_rss; - tx_alloc_cfg.num_rings = new_tx_config.num_queues; - - /* Add dedicated XDP TX queues if enabled. */ - num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0; - tx_alloc_cfg.num_rings += num_xdp_queues; if (netif_running(priv->dev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); @@ -2056,7 +1901,7 @@ static void gve_turnup(struct gve_priv *priv) napi_schedule(&block->napi); } - if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv)) + if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv)) xdp_features_set_redirect_target(priv->dev, false); gve_set_napi_enabled(priv); @@ -2412,6 +2257,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, priv->rx_cfg.num_queues); } + priv->tx_cfg.num_xdp_queues = 0; dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); @@ -2792,7 +2638,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->service_task_flags = 0x0; priv->state_flags = 0x0; priv->ethtool_flags = 0x0; - priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE; + priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_set_probe_in_progress(priv); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index acb73d4d0de6..90e875c1832f 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } -static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, - dma_addr_t addr, struct page *page, __be64 *slot_addr) +static void gve_setup_rx_buffer(struct gve_rx_ring *rx, + struct gve_rx_slot_page_info *page_info, + dma_addr_t addr, struct page *page, + __be64 *slot_addr) { page_info->page = page; page_info->page_offset = 0; page_info->page_address = page_address(page); + page_info->buf_size = rx->packet_buffer_size; *slot_addr = cpu_to_be64(addr); /* The page already has 1 ref */ page_ref_add(page, INT_MAX - 1); @@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, return err; } - gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); + gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr); return 0; } @@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, struct page *page = rx->data.qpl->pages[i]; dma_addr_t addr = i * PAGE_SIZE; - gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, + gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr, + page, &rx->data.data_ring[i].qpl_offset); continue; } @@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, rx->qpl_copy_pool[j].page = page; rx->qpl_copy_pool[j].page_offset = 0; rx->qpl_copy_pool[j].page_address = page_address(page); + rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size; /* The page already has 1 ref. */ page_ref_add(page, INT_MAX - 1); @@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, rx->gve = priv; rx->q_num = idx; + rx->packet_buffer_size = cfg->packet_buffer_size; rx->mask = slots - 1; rx->data.raw_addressing = cfg->raw_addressing; @@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, rx->db_threshold = slots / 2; gve_rx_init_ring_state_gqi(rx); - rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_rx_ctx_clear(&rx->ctx); return 0; @@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv, int err = 0; int i, j; - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), + rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) return -ENOMEM; - for (i = 0; i < cfg->qcfg->num_queues; i++) { + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) { err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv, if (!rx) return; - for (i = 0; i < cfg->qcfg->num_queues; i++) + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) gve_rx_free_ring_gqi(priv, &rx[i], cfg); kvfree(rx); @@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, copy_page_info->pad = page_info->pad; skb = gve_rx_add_frags(napi, copy_page_info, - rx->packet_buffer_size, len, ctx); + copy_page_info->buf_size, len, ctx); if (unlikely(!skb)) return NULL; @@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, * device. */ if (page_info->can_flip) { - skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); + skb = gve_rx_add_frags(napi, page_info, page_info->buf_size, + len, ctx); /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */ @@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, page_info, len, napi, data_slot, - rx->packet_buffer_size, ctx); + page_info->buf_size, ctx); } else { skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, page_info, len, napi, data_slot); @@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, void *old_data; int xdp_act; - xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); + xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq); xdp_prepare_buff(&xdp, page_info->page_address + page_info->page_offset, GVE_RX_PAD, len, false); diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 856ade0c209f..deb869eb38e0 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -223,6 +223,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, memset(rx, 0, sizeof(*rx)); rx->gve = priv; rx->q_num = idx; + rx->packet_buffer_size = cfg->packet_buffer_size; + + if (cfg->xdp) { + rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO; + rx->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rx->packet_buffer_truesize = rx->packet_buffer_size; + rx->rx_headroom = 0; + } rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); @@ -253,7 +262,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (cfg->raw_addressing) { - pool = gve_rx_create_page_pool(priv, rx); + pool = gve_rx_create_page_pool(priv, rx, cfg->xdp); if (IS_ERR(pool)) goto err; @@ -299,12 +308,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv, int err; int i; - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), + rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) return -ENOMEM; - for (i = 0; i < cfg->qcfg->num_queues; i++) { + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) { err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -333,7 +342,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv, if (!rx) return; - for (i = 0; i < cfg->qcfg->num_queues; i++) + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) gve_rx_free_ring_dqo(priv, &rx[i], cfg); kvfree(rx); @@ -483,14 +492,15 @@ static void gve_skb_add_rx_frag(struct gve_rx_ring *rx, if (rx->dqo.page_pool) { skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags, buf_state->page_info.netmem, - buf_state->page_info.page_offset, - buf_len, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, buf_state->page_info.buf_size); } else { skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, - buf_state->page_info.page_offset, - buf_len, buf_state->page_info.buf_size); + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, + buf_state->page_info.buf_size); } } @@ -536,6 +546,29 @@ static int gve_rx_append_frags(struct napi_struct *napi, return 0; } +static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp, struct bpf_prog *xprog, + int xdp_act, + struct gve_rx_buf_state_dqo *buf_state) +{ + u64_stats_update_begin(&rx->statss); + switch (xdp_act) { + case XDP_ABORTED: + case XDP_DROP: + default: + rx->xdp_actions[xdp_act]++; + break; + case XDP_TX: + rx->xdp_tx_errors++; + break; + case XDP_REDIRECT: + rx->xdp_redirect_errors++; + break; + } + u64_stats_update_end(&rx->statss); + gve_free_buffer(rx, buf_state); +} + /* Returns 0 if descriptor is completed successfully. * Returns -EINVAL if descriptor is invalid. * Returns -ENOMEM if data cannot be copied to skb. @@ -550,6 +583,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, const bool hsplit = compl_desc->split_header; struct gve_rx_buf_state_dqo *buf_state; struct gve_priv *priv = rx->gve; + struct bpf_prog *xprog; u16 buf_len; u16 hdr_len; @@ -610,7 +644,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, /* Sync the portion of dma buffer for CPU to read. */ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, - buf_state->page_info.page_offset, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, DMA_FROM_DEVICE); /* Append to current skb if one exists. */ @@ -622,6 +657,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } + xprog = READ_ONCE(priv->xdp_prog); + if (xprog) { + struct xdp_buff xdp; + void *old_data; + int xdp_act; + + xdp_init_buff(&xdp, buf_state->page_info.buf_size, + &rx->xdp_rxq); + xdp_prepare_buff(&xdp, + buf_state->page_info.page_address + + buf_state->page_info.page_offset, + buf_state->page_info.pad, + buf_len, false); + old_data = xdp.data; + xdp_act = bpf_prog_run_xdp(xprog, &xdp); + buf_state->page_info.pad += xdp.data - old_data; + buf_len = xdp.data_end - xdp.data; + if (xdp_act != XDP_PASS) { + gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act, + buf_state); + return 0; + } + + u64_stats_update_begin(&rx->statss); + rx->xdp_actions[XDP_PASS]++; + u64_stats_update_end(&rx->statss); + } + if (eop && buf_len <= priv->rx_copybreak) { rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, &buf_state->page_info, buf_len); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 4350ebd9c2bd..1b40bf0c811a 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -334,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { struct gve_tx_ring *tx = cfg->tx; + int total_queues; int err = 0; int i, j; - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { + total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; + if (total_queues > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); return -EINVAL; } - if (cfg->start_idx == 0) { - tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), - GFP_KERNEL); - if (!tx) - return -ENOMEM; - } else if (!tx) { - netif_err(priv, drv, priv->dev, - "Cannot alloc tx rings from a nonzero start idx without tx array\n"); - return -EINVAL; - } + tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), + GFP_KERNEL); + if (!tx) + return -ENOMEM; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) { + for (i = 0; i < total_queues; i++) { err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -370,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv, cleanup: for (j = 0; j < i; j++) gve_tx_free_ring_gqi(priv, &tx[j], cfg); - if (cfg->start_idx == 0) - kvfree(tx); + kvfree(tx); return err; } @@ -384,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv, if (!tx) return; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) + for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) gve_tx_free_ring_gqi(priv, &tx[i], cfg); - if (cfg->start_idx == 0) { - kvfree(tx); - cfg->tx = NULL; - } + kvfree(tx); + cfg->tx = NULL; } /* gve_tx_avail - Calculates the number of slots available in the ring @@ -844,7 +837,7 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, return -ENETDOWN; qid = gve_xdp_tx_queue_id(priv, - smp_processor_id() % priv->num_xdp_queues); + smp_processor_id() % priv->tx_cfg.num_xdp_queues); tx = &priv->tx[qid]; @@ -959,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, spin_lock(&tx->xdp_lock); while (sent < budget) { - if (!gve_can_tx(tx, GVE_TX_START_THRESH)) - goto out; - - if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { - tx->xdp_xsk_done = tx->xdp_xsk_wakeup; + if (!gve_can_tx(tx, GVE_TX_START_THRESH) || + !xsk_tx_peek_desc(tx->xsk_pool, &desc)) goto out; - } data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 394debc62268..2eba868d8037 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { struct gve_tx_ring *tx = cfg->tx; + int total_queues; int err = 0; int i, j; - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { + total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; + if (total_queues > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); return -EINVAL; } - if (cfg->start_idx == 0) { - tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), - GFP_KERNEL); - if (!tx) - return -ENOMEM; - } else if (!tx) { - netif_err(priv, drv, priv->dev, - "Cannot alloc tx rings from a nonzero start idx without tx array\n"); - return -EINVAL; - } + tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), + GFP_KERNEL); + if (!tx) + return -ENOMEM; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) { + for (i = 0; i < total_queues; i++) { err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, err: for (j = 0; j < i; j++) gve_tx_free_ring_dqo(priv, &tx[j], cfg); - if (cfg->start_idx == 0) - kvfree(tx); + kvfree(tx); return err; } @@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv, if (!tx) return; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) + for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) gve_tx_free_ring_dqo(priv, &tx[i], cfg); - if (cfg->start_idx == 0) { - kvfree(tx); - cfg->tx = NULL; - } + kvfree(tx); + cfg->tx = NULL; } /* Returns the number of slots available in the ring */ |