diff options
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c | 70 |
1 files changed, 49 insertions, 21 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c index 403f0f335ba6..8f5021e59e0a 100644 --- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -4,6 +4,7 @@ * Copyright (C) 2015-2024 Google, Inc. */ +#include <net/xdp_sock_drv.h> #include "gve.h" #include "gve_utils.h" @@ -29,6 +30,10 @@ struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) /* Point buf_state to itself to mark it as allocated */ buf_state->next = buffer_id; + /* Clear the buffer pointers */ + buf_state->page_info.page = NULL; + buf_state->xsk_buff = NULL; + return buf_state; } @@ -139,7 +144,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + buf_state->page_info.pad = rx->rx_headroom; buf_state->last_single_ref_offset = 0; /* The page already has 1 ref. */ @@ -162,7 +168,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state) void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - const u16 data_buffer_size = priv->data_buffer_size_dqo; + const u16 data_buffer_size = rx->packet_buffer_truesize; int pagecount; /* Can't reuse if we only fit one buffer per page */ @@ -205,49 +211,53 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state, bool allow_direct) { - struct page *page = buf_state->page_info.page; + netmem_ref netmem = buf_state->page_info.netmem; - if (!page) + if (!netmem) return; - page_pool_put_full_page(page->pp, page, allow_direct); - buf_state->page_info.page = NULL; + page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct); + buf_state->page_info.netmem = 0; } static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - struct gve_priv *priv = rx->gve; - struct page *page; + netmem_ref netmem; - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; - page = page_pool_alloc(rx->dqo.page_pool, - &buf_state->page_info.page_offset, - &buf_state->page_info.buf_size, GFP_ATOMIC); + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + netmem = page_pool_alloc_netmem(rx->dqo.page_pool, + &buf_state->page_info.page_offset, + &buf_state->page_info.buf_size, + GFP_ATOMIC); - if (!page) + if (!netmem) return -ENOMEM; - buf_state->page_info.page = page; - buf_state->page_info.page_address = page_address(page); - buf_state->addr = page_pool_get_dma_addr(page); + buf_state->page_info.netmem = netmem; + buf_state->page_info.page_address = netmem_address(netmem); + buf_state->addr = page_pool_get_dma_addr_netmem(netmem); + buf_state->page_info.pad = rx->dqo.page_pool->p.offset; return 0; } struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx) + struct gve_rx_ring *rx, + bool xdp) { u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); struct page_pool_params pp = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = 0, .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt, + .nid = priv->numa_node, .dev = &priv->pdev->dev, .netdev = priv->dev, .napi = &priv->ntfy_blocks[ntfy_id].napi, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = xdp ? XDP_PACKET_HEADROOM : 0, }; return page_pool_create(&pp); @@ -269,7 +279,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { if (rx->dqo.page_pool) { - buf_state->page_info.page = NULL; + buf_state->page_info.netmem = 0; gve_free_buf_state(rx, buf_state); } else { gve_dec_pagecnt_bias(&buf_state->page_info); @@ -281,7 +291,24 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) { struct gve_rx_buf_state_dqo *buf_state; - if (rx->dqo.page_pool) { + if (rx->xsk_pool) { + buf_state = gve_alloc_buf_state(rx); + if (unlikely(!buf_state)) + return -ENOMEM; + + buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool); + if (unlikely(!buf_state->xsk_buff)) { + xsk_set_rx_need_wakeup(rx->xsk_pool); + gve_free_buf_state(rx, buf_state); + return -ENOMEM; + } + /* Allocated xsk buffer. Clear wakeup in case it was set. */ + xsk_clear_rx_need_wakeup(rx->xsk_pool); + desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); + desc->buf_addr = + cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff)); + return 0; + } else if (rx->dqo.page_pool) { buf_state = gve_alloc_buf_state(rx); if (WARN_ON_ONCE(!buf_state)) return -ENOMEM; @@ -301,7 +328,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) } desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_addr = cpu_to_le64(buf_state->addr + - buf_state->page_info.page_offset); + buf_state->page_info.page_offset + + buf_state->page_info.pad); return 0; |