diff options
author | Maxim Mikityanskiy <maximmi@nvidia.com> | 2022-09-30 19:29:00 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-10-01 23:30:21 +0300 |
commit | ddb7afeee28bc37b4815470d00f0c8db3fcabd2e (patch) | |
tree | 1b81ba3b8dbe0dfe952bfd92618d3b2315c72b48 /drivers/net/ethernet/mellanox | |
parent | 96d37d861a09ba4b6ea08b87fa1c173c1af522b1 (diff) | |
download | linux-ddb7afeee28bc37b4815470d00f0c8db3fcabd2e.tar.xz |
net/mlx5e: Optimize RQ page deallocation
mlx5e_free_rx_mpwqe loops over all pages of a MPWQE, calling
mlx5e_page_release for ones that are not scheduled for XDP_TX or
XDP_REDIRECT; and mlx5e_page_release checks whether it's an XSK RQ or a
regular one for each page/XSK frame. This check can be moved outside the
loop to reduce the number of branches.
mlx5e_free_rx_wqe loops over all fragments, calling mlx5e_page_release
for the ones that are last in a page; and mlx5e_page_release checks
whether it's an XSK RQ or a regular one for each fragment. Using the
fact that XSK doesn't support multiple fragments, it can be optimized
for both XSK and regular usages:
1. Make an early check for XSK and call its deallocator directly, saving
3 branches (loop condition, frag->last_in_page and selection of
deallocator).
2. Call the regular deallocator directly in the non-XSK case, saving a
branch per fragment, except the first one.
After the changes, mlx5e_page_release is removed, as there are no
callers left.
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 41 |
2 files changed, 24 insertions, 19 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 7bd49f0b1271..661d2d5748f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -253,7 +253,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, return NULL; /* page/packet was consumed by XDP */ /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse - * will be handled by mlx5e_put_rx_frag. + * will be handled by mlx5e_free_rx_wqe. * On SKB allocation failure, NULL is returned. */ return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d0db6a66cb46..36eda4c958a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -317,20 +317,6 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec } } -static inline void mlx5e_page_release(struct mlx5e_rq *rq, - union mlx5e_alloc_unit *au, - bool recycle) -{ - if (rq->xsk_pool) - /* The `recycle` parameter is ignored, and the page is always - * put into the Reuse Ring, because there is no way to return - * the page to the userspace when the interface goes down. - */ - xsk_buff_free(au->xsk); - else - mlx5e_page_release_dynamic(rq, au->page, recycle); -} - static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *frag) { @@ -352,7 +338,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, bool recycle) { if (frag->last_in_page) - mlx5e_page_release(rq, frag->au, recycle); + mlx5e_page_release_dynamic(rq, frag->au->page, recycle); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -395,6 +381,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, { int i; + if (rq->xsk_pool) { + /* The `recycle` parameter is ignored, and the page is always + * put into the Reuse Ring, because there is no way to return + * the page to the userspace when the interface goes down. + */ + xsk_buff_free(wi->au->xsk); + return; + } + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) mlx5e_put_rx_frag(rq, wi, recycle); } @@ -463,9 +458,19 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); - for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) - if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) - mlx5e_page_release(rq, &alloc_units[i], recycle); + if (rq->xsk_pool) { + /* The `recycle` parameter is ignored, and the page is always + * put into the Reuse Ring, because there is no way to return + * the page to the userspace when the interface goes down. + */ + for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + xsk_buff_free(alloc_units[i].xsk); + } else { + for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle); + } } static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) |