diff options
author | Maxim Mikityanskiy <maximmi@mellanox.com> | 2019-06-26 17:35:34 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-06-27 23:53:27 +0300 |
commit | 84a0a2310d6de247335574649726cb000c7c0074 (patch) | |
tree | f9e491f07725ec48bd03d8fbf595296f5871e757 | |
parent | b9673cf5558c0ae1be787611884d4131633f31a8 (diff) | |
download | linux-84a0a2310d6de247335574649726cb000c7c0074.tar.xz |
net/mlx5e: XDP_TX from UMEM support
When an XDP program returns XDP_TX, and the RQ is XSK-enabled, it
requires careful handling, because convert_to_xdp_frame creates a new
page and copies the data there, while our driver expects the xdp_frame
to point to the same memory as the xdp_buff. Handle this case
separately: map the page, and in the end unmap it and call
xdp_return_frame.
Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 50 |
1 files changed, 42 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index b3e118fc4521..1364bdff702c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -69,14 +69,48 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdptxd.data = xdpf->data; xdptxd.len = xdpf->len; - xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE; + if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) { + /* The xdp_buff was in the UMEM and was copied into a newly + * allocated page. The UMEM page was returned via the ZCA, and + * this new page has to be mapped at this point and has to be + * unmapped and returned via xdp_return_frame on completion. + */ + + /* Prevent double recycling of the UMEM page. Even in case this + * function returns false, the xdp_buff shouldn't be recycled, + * as it was already done in xdp_convert_zc_to_xdp_frame. + */ + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ + + xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; - dma_addr = di->addr + (xdpf->data - (void *)xdpf); - dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE); + dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, + DMA_TO_DEVICE); + if (dma_mapping_error(sq->pdev, dma_addr)) { + xdp_return_frame(xdpf); + return false; + } - xdptxd.dma_addr = dma_addr; - xdpi.page.rq = rq; - xdpi.page.di = *di; + xdptxd.dma_addr = dma_addr; + xdpi.frame.xdpf = xdpf; + xdpi.frame.dma_addr = dma_addr; + } else { + /* Driver assumes that convert_to_xdp_frame returns an xdp_frame + * that points to the same memory region as the original + * xdp_buff. It allows to map the memory only once and to use + * the DMA_BIDIRECTIONAL mode. + */ + + xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE; + + dma_addr = di->addr + (xdpf->data - (void *)xdpf); + dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, + DMA_TO_DEVICE); + + xdptxd.dma_addr = dma_addr; + xdpi.page.rq = rq; + xdpi.page.di = *di; + } return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi); } @@ -298,13 +332,13 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, switch (xdpi.mode) { case MLX5E_XDP_XMIT_MODE_FRAME: - /* XDP_REDIRECT */ + /* XDP_TX from the XSK RQ and XDP_REDIRECT */ dma_unmap_single(sq->pdev, xdpi.frame.dma_addr, xdpi.frame.xdpf->len, DMA_TO_DEVICE); xdp_return_frame(xdpi.frame.xdpf); break; case MLX5E_XDP_XMIT_MODE_PAGE: - /* XDP_TX */ + /* XDP_TX from the regular RQ */ mlx5e_page_release(xdpi.page.rq, &xdpi.page.di, recycle); break; default: |