diff options
author | Magnus Karlsson <magnus.karlsson@intel.com> | 2020-08-28 11:26:24 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2020-08-31 22:15:04 +0300 |
commit | 9647c57b11e563f5b33a49ef72b347753917c21c (patch) | |
tree | b0f10d39c70160b836327b848855499a0594931c /include | |
parent | 8ef4e27eb3f03edfbfbe5657b8061f2a47757037 (diff) | |
download | linux-9647c57b11e563f5b33a49ef72b347753917c21c.tar.xz |
xsk: i40e: ice: ixgbe: mlx5: Test for dma_need_sync earlier for better performance
Test for dma_need_sync earlier to increase
performance. xsk_buff_dma_sync_for_cpu() takes an xdp_buff as
parameter and from that the xsk_buff_pool reference is dug out. Perf
shows that this dereference causes a lot of cache misses. But as the
buffer pool is now sent down to the driver at zero-copy initialization
time, we might as well use this pointer directly, instead of going via
the xsk_buff and we can do so already in xsk_buff_dma_sync_for_cpu()
instead of in xp_dma_sync_for_cpu. This gets rid of these cache
misses.
Throughput increases with 3% for the xdpsock l2fwd sample application
on my machine.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-11-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'include')
-rw-r--r-- | include/net/xdp_sock_drv.h | 7 | ||||
-rw-r--r-- | include/net/xsk_buff_pool.h | 3 |
2 files changed, 5 insertions, 5 deletions
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index a7c7d2eff860..5b1ee8a9976d 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -99,10 +99,13 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return xp_raw_get_data(pool, addr); } -static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + if (!pool->dma_need_sync) + return; + xp_dma_sync_for_cpu(xskb); } @@ -222,7 +225,7 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return NULL; } -static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { } diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 38d03a64c9ea..907537dddcac 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -114,9 +114,6 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) { - if (!xskb->pool->dma_need_sync) - return; - xp_dma_sync_for_cpu_slow(xskb); } |