diff options
| author | David S. Miller <davem@davemloft.net> | 2019-11-20 23:34:37 +0300 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-11-20 23:34:37 +0300 |
| commit | e07e75412ba4a8e68f47de345da53847f343f337 (patch) | |
| tree | 652f59998ff9dcb329f619c27fcae1aea50bd105 /include | |
| parent | cec2975f2b7058c42330a0f8164d94c6b7c8c446 (diff) | |
| parent | 07e13edbb6a6a9a407d6772a355836270e924aa8 (diff) | |
| download | linux-e07e75412ba4a8e68f47de345da53847f343f337.tar.xz | |
Merge branch 'page_pool-DMA-sync'
Lorenzo Bianconi says:
====================
add DMA-sync-for-device capability to page_pool API
Introduce the possibility to sync DMA memory for device in the page_pool API.
This feature allows to sync proper DMA size and not always full buffer
(dma_sync_single_for_device can be very costly).
Please note DMA-sync-for-CPU is still device driver responsibility.
Relying on page_pool DMA sync mvneta driver improves XDP_DROP pps of
about 170Kpps:
- XDP_DROP DMA sync managed by mvneta driver: ~420Kpps
- XDP_DROP DMA sync managed by page_pool API: ~585Kpps
Do not change naming convention for the moment since the changes will hit other
drivers as well. I will address it in another series.
Changes since v4:
- do not allow the driver to set max_len to 0
- convert PP_FLAG_DMA_MAP/PP_FLAG_DMA_SYNC_DEV to BIT() macro
Changes since v3:
- move dma_sync_for_device before putting the page in ptr_ring in
__page_pool_recycle_into_ring since ptr_ring can be consumed
concurrently. Simplify the code moving dma_sync_for_device
before running __page_pool_recycle_direct/__page_pool_recycle_into_ring
Changes since v2:
- rely on PP_FLAG_DMA_SYNC_DEV flag instead of dma_sync
Changes since v1:
- rename sync in dma_sync
- set dma_sync_size to 0xFFFFFFFF in page_pool_recycle_direct and
page_pool_put_page routines
- Improve documentation
====================
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
| -rw-r--r-- | include/net/page_pool.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h index e2e1b7b1e8ba..cfbed00ba7ee 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -34,8 +34,18 @@ #include <linux/ptr_ring.h> #include <linux/dma-direction.h> -#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */ -#define PP_FLAG_ALL PP_FLAG_DMA_MAP +#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA + * map/unmap + */ +#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets + * from page_pool will be + * DMA-synced-for-device according to + * the length provided by the device + * driver. + * Please note DMA-sync-for-CPU is still + * device driver responsibility + */ +#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) /* * Fast allocation side cache array/stack @@ -65,6 +75,8 @@ struct page_pool_params { int nid; /* Numa node id to allocate from pages from */ struct device *dev; /* device, for DMA pre-mapping purposes */ enum dma_data_direction dma_dir; /* DMA mapping direction */ + unsigned int max_len; /* max DMA sync memory size */ + unsigned int offset; /* DMA addr offset */ }; struct page_pool { @@ -151,8 +163,8 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, #endif /* Never call this directly, use helpers below */ -void __page_pool_put_page(struct page_pool *pool, - struct page *page, bool allow_direct); +void __page_pool_put_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, bool allow_direct); static inline void page_pool_put_page(struct page_pool *pool, struct page *page, bool allow_direct) @@ -161,14 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool, * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - __page_pool_put_page(pool, page, allow_direct); + __page_pool_put_page(pool, page, -1, allow_direct); #endif } /* Very limited use-cases allow recycle direct */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { - __page_pool_put_page(pool, page, true); + __page_pool_put_page(pool, page, -1, true); } /* Disconnects a page (from a page_pool). API users can have a need |
