diff options
-rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_main.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx_dqo.c | 22 |
4 files changed, 53 insertions, 17 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 9895541eddae..2fab38c8ee78 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -59,6 +59,8 @@ #define GVE_MAX_RX_BUFFER_SIZE 4096 +#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 + #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4 @@ -227,7 +229,11 @@ struct gve_rx_cnts { /* Contains datapath state used to represent an RX queue. */ struct gve_rx_ring { struct gve_priv *gve; - u16 packet_buffer_size; + + u16 packet_buffer_size; /* Size of buffer posted to NIC */ + u16 packet_buffer_truesize; /* Total size of RX buffer */ + u16 rx_headroom; + union { /* GQI fields */ struct { @@ -688,6 +694,7 @@ struct gve_rx_alloc_rings_cfg { bool raw_addressing; bool enable_header_split; bool reset_rss; + bool xdp; /* Allocated resources are returned here */ struct gve_rx_ring *rx; @@ -1218,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state); int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc); struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx); + struct gve_rx_ring *rx, + bool xdp); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c index f9824664d04c..a71883e1d920 100644 --- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); - buf_state->page_info.buf_size = rx->packet_buffer_size; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + buf_state->page_info.pad = rx->rx_headroom; buf_state->last_single_ref_offset = 0; /* The page already has 1 ref. */ @@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state) void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - const u16 data_buffer_size = rx->packet_buffer_size; + const u16 data_buffer_size = rx->packet_buffer_truesize; int pagecount; /* Can't reuse if we only fit one buffer per page */ @@ -219,7 +220,7 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, { netmem_ref netmem; - buf_state->page_info.buf_size = rx->packet_buffer_size; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; netmem = page_pool_alloc_netmem(rx->dqo.page_pool, &buf_state->page_info.page_offset, &buf_state->page_info.buf_size, @@ -231,12 +232,14 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, buf_state->page_info.netmem = netmem; buf_state->page_info.page_address = netmem_address(netmem); buf_state->addr = page_pool_get_dma_addr_netmem(netmem); + buf_state->page_info.pad = rx->dqo.page_pool->p.offset; return 0; } struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx) + struct gve_rx_ring *rx, + bool xdp) { u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); struct page_pool_params pp = { @@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, .netdev = priv->dev, .napi = &priv->ntfy_blocks[ntfy_id].napi, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = xdp ? XDP_PACKET_HEADROOM : 0, }; return page_pool_create(&pp); @@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) } desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_addr = cpu_to_le64(buf_state->addr + - buf_state->page_info.page_offset); + buf_state->page_info.page_offset + + buf_state->page_info.pad); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 20aabbe0e518..cb2f9978f45e 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1149,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) napi->napi_id); if (err) goto err; - err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); + if (gve_is_qpl(priv)) + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + else + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx->dqo.page_pool); if (err) goto err; rx->xsk_pool = xsk_get_pool_from_qid(dev, i); @@ -1226,6 +1232,7 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, cfg->ring_size = priv->rx_desc_cnt; cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size; cfg->rx = priv->rx; + cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues; } void gve_get_curr_alloc_cfgs(struct gve_priv *priv, @@ -1461,6 +1468,7 @@ static int gve_configure_rings_xdp(struct gve_priv *priv, gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); tx_alloc_cfg.num_xdp_rings = num_xdp_rings; + rx_alloc_cfg.xdp = !!num_xdp_rings; return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); } @@ -1629,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) static int verify_xdp_configuration(struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); + u16 max_xdp_mtu; if (dev->features & NETIF_F_LRO) { netdev_warn(dev, "XDP is not supported when LRO is on.\n"); @@ -1641,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev) return -EOPNOTSUPP; } - if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) { + max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr); + if (priv->queue_format == GVE_GQI_QPL_FORMAT) + max_xdp_mtu -= GVE_RX_PAD; + + if (dev->mtu > max_xdp_mtu) { netdev_warn(dev, "XDP is not supported for mtu %d.\n", dev->mtu); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 5fbcf93a54e0..2edf3c632cbd 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -225,6 +225,14 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, rx->q_num = idx; rx->packet_buffer_size = cfg->packet_buffer_size; + if (cfg->xdp) { + rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO; + rx->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rx->packet_buffer_truesize = rx->packet_buffer_size; + rx->rx_headroom = 0; + } + rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, @@ -254,7 +262,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (cfg->raw_addressing) { - pool = gve_rx_create_page_pool(priv, rx); + pool = gve_rx_create_page_pool(priv, rx, cfg->xdp); if (IS_ERR(pool)) goto err; @@ -484,14 +492,15 @@ static void gve_skb_add_rx_frag(struct gve_rx_ring *rx, if (rx->dqo.page_pool) { skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags, buf_state->page_info.netmem, - buf_state->page_info.page_offset, - buf_len, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, buf_state->page_info.buf_size); } else { skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, - buf_state->page_info.page_offset, - buf_len, buf_state->page_info.buf_size); + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, + buf_state->page_info.buf_size); } } @@ -611,7 +620,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, /* Sync the portion of dma buffer for CPU to read. */ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, - buf_state->page_info.page_offset, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, DMA_FROM_DEVICE); /* Append to current skb if one exists. */ |