diff options
author | Magnus Karlsson <magnus.karlsson@intel.com> | 2020-08-28 11:26:15 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2020-08-31 22:15:03 +0300 |
commit | 1742b3d528690ae7773cf7bf2f01a90ee1de2fe0 (patch) | |
tree | eeb0924a4023c43b42bbe2f552b3481c1fd19442 /net/xdp/xsk_buff_pool.c | |
parent | 29523c5e6716521f6e2fb59d7785e2bc0b1a993a (diff) | |
download | linux-1742b3d528690ae7773cf7bf2f01a90ee1de2fe0.tar.xz |
xsk: i40e: ice: ixgbe: mlx5: Pass buffer pool to driver instead of umem
Replace the explicit umem reference passed to the driver in AF_XDP
zero-copy mode with the buffer pool instead. This in preparation for
extending the functionality of the zero-copy mode so that umems can be
shared between queues on the same netdev and also between netdevs. In
this commit, only an umem reference has been added to the buffer pool
struct. But later commits will add other entities to it. These are
going to be entities that are different between different queue ids
and netdevs even though the umem is shared between them.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-2-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp/xsk_buff_pool.c')
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index a2044c245215..f3df3cba3d73 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -29,7 +29,7 @@ void xp_destroy(struct xsk_buff_pool *pool) kvfree(pool); } -struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, +struct xsk_buff_pool *xp_create(struct xdp_umem *umem, u32 chunks, u32 chunk_size, u32 headroom, u64 size, bool unaligned) { @@ -54,6 +54,7 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, pool->chunk_size = chunk_size; pool->unaligned = unaligned; pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM; + pool->umem = umem; INIT_LIST_HEAD(&pool->free_list); for (i = 0; i < pool->free_heads_cnt; i++) { @@ -63,7 +64,7 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, pool->free_heads[i] = xskb; } - err = xp_addr_map(pool, pages, nr_pages); + err = xp_addr_map(pool, umem->pgs, umem->npgs); if (!err) return pool; |