summaryrefslogtreecommitdiff
path: root/include/net/xsk_buff_pool.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/xsk_buff_pool.h')
-rw-r--r--include/net/xsk_buff_pool.h37
1 files changed, 25 insertions, 12 deletions
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index bacb33f1e3e5..cac56e6b0869 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -28,10 +28,8 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- u64 orig_addr;
- struct list_head free_list_node;
- struct list_head xskb_list_node;
-};
+ struct list_head list_node;
+} __aligned_largest;
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
@@ -55,6 +53,8 @@ struct xsk_buff_pool {
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
+ /* Protects generic receive in shared and non-shared umem mode. */
+ spinlock_t rx_lock;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;
@@ -78,6 +78,7 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u32 xdp_zc_max_segs;
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
@@ -120,7 +121,6 @@ void xp_free(struct xdp_buff_xsk *xskb);
static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
u64 addr)
{
- xskb->orig_addr = addr;
xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
}
@@ -143,6 +143,14 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
+
+struct xdp_desc_ctx {
+ dma_addr_t dma;
+ struct xsk_tx_metadata *meta;
+};
+
+struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr);
+
static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
{
return xskb->dma;
@@ -185,7 +193,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}
-static inline bool xp_mb_desc(struct xdp_desc *desc)
+static inline bool xp_mb_desc(const struct xdp_desc *desc)
{
return desc->options & XDP_PKT_CONTD;
}
@@ -222,14 +230,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb)
xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
}
-static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
+ struct xsk_buff_pool *pool)
{
- u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ u64 orig_addr = xskb->xdp.data - pool->addrs;
+ u64 offset;
+
+ if (!pool->unaligned)
+ return orig_addr;
- offset += xskb->pool->headroom;
- if (!xskb->pool->unaligned)
- return xskb->orig_addr + offset;
- return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+ offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ offset += pool->headroom;
+ orig_addr -= offset;
+ return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)