summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2026-02-23 23:41:50 +0300
committerPaolo Abeni <pabeni@redhat.com>2026-02-26 12:54:23 +0300
commit0285cc3dac1b4ceb3dacdfce43627d41d649cf47 (patch)
tree327aa43ed0eec6d62039efa599317ed24a4df5f0
parent3a145cf492a3a154afb288cd460adf6721614eab (diff)
downloadlinux-0285cc3dac1b4ceb3dacdfce43627d41d649cf47.tar.xz
net/mlx5e: Alloc rq drop page based on calculated page_shift
An upcoming patch will allow setting the page order for RX pages to be greater than 0. Make sure that the drop page will also be allocated with the right size when that happens. Take extra care when calculating the drop page size to account for page_shift < PAGE_SHIFT which can happen for xsk. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Link: https://patch.msgid.link/20260223204155.1783580-11-tariqt@nvidia.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6344dbb6335e..2d3d89707246 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -636,14 +636,18 @@ static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
- rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
+ /* xsk can have page_shift < PAGE_SHIFT */
+ u16 page_order = max_t(s16, rq->mpwqe.page_shift - PAGE_SHIFT, 0);
+ u32 page_size = BIT(PAGE_SHIFT + page_order);
+
+ rq->wqe_overflow.page = alloc_pages(GFP_KERNEL, page_order);
if (!rq->wqe_overflow.page)
return -ENOMEM;
rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
- PAGE_SIZE, rq->buff.map_dir);
+ page_size, rq->buff.map_dir);
if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
- __free_page(rq->wqe_overflow.page);
+ __free_pages(rq->wqe_overflow.page, page_order);
return -ENOMEM;
}
return 0;
@@ -651,9 +655,12 @@ static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
- dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
- rq->buff.map_dir);
- __free_page(rq->wqe_overflow.page);
+ u16 page_order = max_t(s16, rq->mpwqe.page_shift - PAGE_SHIFT, 0);
+ u32 page_size = BIT(PAGE_SHIFT + page_order);
+
+ dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, page_size,
+ rq->buff.map_dir);
+ __free_pages(rq->wqe_overflow.page, page_order);
}
static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -884,15 +891,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_xdp_prog;
- err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
- if (err)
- goto err_rq_wq_destroy;
-
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
+ err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
+ if (err)
+ goto err_rq_wq_destroy;
+
rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,