diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_txrx.h')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.h | 288 |
1 files changed, 177 insertions, 111 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 36a0f828a6f8..75b977094741 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -7,8 +7,10 @@ #include <linux/dim.h> #include <net/libeth/cache.h> -#include <net/tcp.h> +#include <net/libeth/types.h> #include <net/netdev_queues.h> +#include <net/tcp.h> +#include <net/xdp.h> #include "idpf_lan_txrx.h" #include "virtchnl2_lan_desc.h" @@ -57,6 +59,9 @@ /* Default vector sharing */ #define IDPF_MBX_Q_VEC 1 #define IDPF_MIN_Q_VEC 1 +#define IDPF_MIN_RDMA_VEC 2 +/* Data vector for NOIRQ queues */ +#define IDPF_RESERVED_VECS 1 #define IDPF_DFLT_TX_Q_DESC_COUNT 512 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 @@ -107,8 +112,8 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64 -#define IDPF_RX_BI_GEN_M BIT(16) -#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) +#define IDPF_RFL_BI_GEN_M BIT(16) +#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0) #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M @@ -117,10 +122,6 @@ do { \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) -#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) -#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ - (txq)->desc_count >> 2) - #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) /* Determine the absolute number of completions pending, i.e. the number of * completions that are expected to arrive on the TX completion queue. @@ -130,11 +131,7 @@ do { \ 0 : U32_MAX) + \ (txq)->num_completions_pending - (txq)->complq->num_completions) -#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 -/* Adjust the generation for the completion tag and wrap if necessary */ -#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ - ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ - 0 : (txq)->compl_tag_cur_gen) +#define IDPF_TXBUF_NULL U32_MAX #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS) @@ -144,6 +141,8 @@ do { \ #define IDPF_TX_FLAGS_TUNNEL BIT(3) #define IDPF_TX_FLAGS_TSYN BIT(4) +struct libeth_rq_napi_stats; + union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ @@ -152,18 +151,6 @@ union idpf_tx_flex_desc { #define idpf_tx_buf libeth_sqe /** - * struct idpf_buf_lifo - LIFO for managing OOO completions - * @top: Used to know how many buffers are left - * @size: Total size of LIFO - * @bufs: Backing array - */ -struct idpf_buf_lifo { - u16 top; - u16 size; - struct idpf_tx_stash **bufs; -}; - -/** * struct idpf_tx_offload_params - Offload parameters for a given packet * @tx_flags: Feature flags enabled for this packet * @hdr_offsets: Offset parameter for single queue model @@ -195,6 +182,9 @@ struct idpf_tx_offload_params { * @compl_tag: Associated tag for completion * @td_tag: Descriptor tunneling tag * @offload: Offload parameters + * @prev_ntu: stored TxQ next_to_use in case of rollback + * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback + * @prev_refill_gen: stored refillq generation bit in case of packet rollback */ struct idpf_tx_splitq_params { enum idpf_tx_desc_dtype_value dtype; @@ -205,6 +195,10 @@ struct idpf_tx_splitq_params { }; struct idpf_tx_offload_params offload; + + u16 prev_ntu; + u16 prev_refill_ntc; + bool prev_refill_gen; }; enum idpf_tx_ctx_desc_eipt_offload { @@ -287,11 +281,13 @@ struct idpf_ptype_state { * bit and Q_RFL_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions - * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the * queue + * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt + * @__IDPF_Q_XDP: this is an XDP queue + * @__IDPF_Q_XSK: the queue has an XSk pool installed * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -299,10 +295,12 @@ enum idpf_queue_flags_t { __IDPF_Q_RFL_GEN_CHK, __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, - __IDPF_Q_POLL_MODE, __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, __IDPF_Q_PTP, + __IDPF_Q_NOIRQ, + __IDPF_Q_XDP, + __IDPF_Q_XSK, __IDPF_Q_FLAGS_NBITS, }; @@ -369,14 +367,17 @@ struct idpf_intr_reg { * @num_txq: Number of TX queues * @num_bufq: Number of buffer queues * @num_complq: number of completion queues + * @num_xsksq: number of XSk send queues * @rx: Array of RX queues to service * @tx: Array of TX queues to service * @bufq: Array of buffer queues to service * @complq: array of completion queues + * @xsksq: array of XSk send queues * @intr_reg: See struct idpf_intr_reg - * @napi: napi handler + * @csd: XSk wakeup CSD * @total_events: Number of interrupts processed * @wb_on_itr: whether WB on ITR is enabled + * @napi: napi handler * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not @@ -395,19 +396,24 @@ struct idpf_q_vector { u16 num_txq; u16 num_bufq; u16 num_complq; + u16 num_xsksq; struct idpf_rx_queue **rx; struct idpf_tx_queue **tx; struct idpf_buf_queue **bufq; struct idpf_compl_queue **complq; + struct idpf_tx_queue **xsksq; struct idpf_intr_reg intr_reg; __cacheline_group_end_aligned(read_mostly); __cacheline_group_begin_aligned(read_write); - struct napi_struct napi; + call_single_data_t csd; + u16 total_events; bool wb_on_itr; + struct napi_struct napi; + struct dim tx_dim; u16 tx_itr_value; bool tx_intr_mode; @@ -424,8 +430,8 @@ struct idpf_q_vector { __cacheline_group_end_aligned(cold); }; -libeth_cacheline_set_assert(struct idpf_q_vector, 120, - 24 + sizeof(struct napi_struct) + +libeth_cacheline_set_assert(struct idpf_q_vector, 136, + 56 + sizeof(struct napi_struct) + 2 * sizeof(struct dim), 8); @@ -467,38 +473,32 @@ struct idpf_tx_queue_stats { #define IDPF_DIM_DEFAULT_PROFILE_IX 1 /** - * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo - * @sched_buf_hash: Hash table to store buffers - */ -struct idpf_txq_stash { - struct idpf_buf_lifo buf_stack; - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_aligned; - -/** * struct idpf_rx_queue - software structure representing a receive queue * @rx: universal receive descriptor array * @single_buf: buffer descriptor array in singleq * @desc_ring: virtual descriptor ring address * @bufq_sets: Pointer to the array of buffer queues in splitq mode * @napi: NAPI instance corresponding to this queue (splitq) + * @xdp_prog: attached XDP program * @rx_buf: See struct &libeth_fqe * @pp: Page pool pointer in singleq mode - * @netdev: &net_device corresponding to this queue * @tail: Tail offset. Used for both queue models single and split. * @flags: See enum idpf_queue_flags_t * @idx: For RX queue, it is used to index to total RX queue across groups and * used for skb reporting. * @desc_count: Number of descriptors + * @num_xdp_txq: total number of XDP Tx queues + * @xdpsqs: shortcut for XDP Tx queues array * @rxdids: Supported RX descriptor ids + * @truesize: data buffer truesize in singleq * @rx_ptype_lkup: LUT of Rx ptypes + * @xdp_rxq: XDP queue info * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean * @next_to_alloc: RX buffer to allocate at - * @skb: Pointer to the skb - * @truesize: data buffer truesize in singleq + * @xdp: XDP buffer with the current frame + * @xsk: current XDP buffer in XSk mode + * @pool: XSk pool if installed * @cached_phc_time: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats @@ -523,30 +523,44 @@ struct idpf_rx_queue { struct { struct idpf_bufq_set *bufq_sets; struct napi_struct *napi; + struct bpf_prog __rcu *xdp_prog; }; struct { struct libeth_fqe *rx_buf; struct page_pool *pp; + void __iomem *tail; }; }; - struct net_device *netdev; - void __iomem *tail; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); u16 idx; u16 desc_count; - u32 rxdids; + u32 num_xdp_txq; + union { + struct idpf_tx_queue **xdpsqs; + struct { + u32 rxdids; + u32 truesize; + }; + }; const struct libeth_rx_pt *rx_ptype_lkup; + + struct xdp_rxq_info xdp_rxq; __cacheline_group_end_aligned(read_mostly); __cacheline_group_begin_aligned(read_write); - u16 next_to_use; - u16 next_to_clean; - u16 next_to_alloc; + u32 next_to_use; + u32 next_to_clean; + u32 next_to_alloc; - struct sk_buff *skb; - u32 truesize; + union { + struct libeth_xdp_buff_stash xdp; + struct { + struct libeth_xdp_buff *xsk; + struct xsk_buff_pool *pool; + }; + }; u64 cached_phc_time; struct u64_stats_sync stats_sync; @@ -566,8 +580,11 @@ struct idpf_rx_queue { u16 rx_max_pkt_size; __cacheline_group_end_aligned(cold); }; -libeth_cacheline_set_assert(struct idpf_rx_queue, 64, - 88 + sizeof(struct u64_stats_sync), +libeth_cacheline_set_assert(struct idpf_rx_queue, + ALIGN(64, __alignof(struct xdp_rxq_info)) + + sizeof(struct xdp_rxq_info), + 96 + offsetof(struct idpf_rx_queue, q_stats) - + offsetofend(struct idpf_rx_queue, cached_phc_time), 32); /** @@ -579,36 +596,21 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @desc_ring: virtual descriptor ring address * @tx_buf: See struct idpf_tx_buf * @txq_grp: See struct idpf_txq_group + * @complq: corresponding completion queue in XDP mode * @dev: Device back pointer for DMA mapping + * @pool: corresponding XSk pool if installed * @tail: Tail offset. Used for both queue models single and split * @flags: See enum idpf_queue_flags_t * @idx: For TX queue, it is used as index to map between TX queue group and * hot path TX pointers stored in vport. Used in both singleq/splitq. * @desc_count: Number of descriptors * @tx_min_pkt_len: Min supported packet length - * @compl_tag_gen_s: Completion tag generation bit - * The format of the completion tag will change based on the TXQ - * descriptor ring size so that we can maintain roughly the same level - * of "uniqueness" across all descriptor sizes. For example, if the - * TXQ descriptor ring size is 64 (the minimum size supported), the - * completion tag will be formatted as below: - * 15 6 5 0 - * -------------------------------- - * | GEN=0-1023 |IDX = 0-63| - * -------------------------------- - * - * This gives us 64*1024 = 65536 possible unique values. Similarly, if - * the TXQ descriptor ring size is 8160 (the maximum size supported), - * the completion tag will be formatted as below: - * 15 13 12 0 - * -------------------------------- - * |GEN | IDX = 0-8159 | - * -------------------------------- - * - * This gives us 8*8160 = 65280 possible unique values. + * @thresh: XDP queue cleaning threshold * @netdev: &net_device corresponding to this queue * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean + * @last_re: last descriptor index that RE bit was set + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on * the TX completion queue, it can be for any TXQ associated * with that completion queue. This means we can clean up to @@ -619,11 +621,11 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * only once at the end of the cleaning routine. * @clean_budget: singleq only, queue cleaning budget * @cleaned_pkts: Number of packets cleaned for the above said case - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather - * @stash: Tx buffer stash for Flow-based scheduling mode - * @compl_tag_bufid_m: Completion tag buffer id mask - * @compl_tag_cur_gen: Used to keep track of current completion tag generation - * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @refillq: Pointer to refill queue + * @pending: number of pending descriptors to send in QB + * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers + * @timer: timer for XDP Tx queue cleanup + * @xdp_lock: lock for XDP Tx queues sharing * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync @@ -632,6 +634,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector + * @buf_pool_size: Total number of idpf_tx_buf + * @rel_q_id: relative virtchnl queue index */ struct idpf_tx_queue { __cacheline_group_begin_aligned(read_mostly); @@ -644,36 +648,53 @@ struct idpf_tx_queue { void *desc_ring; }; struct libeth_sqe *tx_buf; - struct idpf_txq_group *txq_grp; - struct device *dev; + union { + struct idpf_txq_group *txq_grp; + struct idpf_compl_queue *complq; + }; + union { + struct device *dev; + struct xsk_buff_pool *pool; + }; void __iomem *tail; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); u16 idx; u16 desc_count; - u16 tx_min_pkt_len; - u16 compl_tag_gen_s; + union { + u16 tx_min_pkt_len; + u32 thresh; + }; struct net_device *netdev; __cacheline_group_end_aligned(read_mostly); __cacheline_group_begin_aligned(read_write); - u16 next_to_use; - u16 next_to_clean; + u32 next_to_use; + u32 next_to_clean; union { - u32 cleaned_bytes; - u32 clean_budget; - }; - u16 cleaned_pkts; + struct { + u16 last_re; + u16 tx_max_bufs; - u16 tx_max_bufs; - struct idpf_txq_stash *stash; + union { + u32 cleaned_bytes; + u32 clean_budget; + }; + u16 cleaned_pkts; - u16 compl_tag_bufid_m; - u16 compl_tag_cur_gen; - u16 compl_tag_gen_max; + struct idpf_sw_queue *refillq; + }; + struct { + u32 pending; + u32 xdp_tx; + + struct libeth_xdpsq_timer *timer; + struct libeth_xdpsq_lock xdp_lock; + }; + }; struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps; struct work_struct *tstamp_task; @@ -688,25 +709,36 @@ struct idpf_tx_queue { dma_addr_t dma; struct idpf_q_vector *q_vector; + + u32 buf_pool_size; + u32 rel_q_id; __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 112 + sizeof(struct u64_stats_sync), - 24); + 104 + + offsetof(struct idpf_tx_queue, cached_tstamp_caps) - + offsetofend(struct idpf_tx_queue, timer) + + offsetof(struct idpf_tx_queue, q_stats) - + offsetofend(struct idpf_tx_queue, tstamp_task), + 32); /** * struct idpf_buf_queue - software structure representing a buffer queue * @split_buf: buffer descriptor array - * @hdr_buf: &libeth_fqe for header buffers - * @hdr_pp: &page_pool for header buffers * @buf: &libeth_fqe for data buffers * @pp: &page_pool for data buffers + * @xsk_buf: &xdp_buff for XSk Rx buffers + * @pool: &xsk_buff_pool on XSk queues + * @hdr_buf: &libeth_fqe for header buffers + * @hdr_pp: &page_pool for header buffers * @tail: Tail offset * @flags: See enum idpf_queue_flags_t * @desc_count: Number of descriptors + * @thresh: refill threshold in XSk * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean * @next_to_alloc: RX buffer to allocate at + * @pending: number of buffers to refill (Xsk) * @hdr_truesize: truesize for buffer headers * @truesize: truesize for data buffers * @q_id: Queue id @@ -720,14 +752,24 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64, struct idpf_buf_queue { __cacheline_group_begin_aligned(read_mostly); struct virtchnl2_splitq_rx_buf_desc *split_buf; + union { + struct { + struct libeth_fqe *buf; + struct page_pool *pp; + }; + struct { + struct libeth_xdp_buff **xsk_buf; + struct xsk_buff_pool *pool; + }; + }; struct libeth_fqe *hdr_buf; struct page_pool *hdr_pp; - struct libeth_fqe *buf; - struct page_pool *pp; void __iomem *tail; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); u32 desc_count; + + u32 thresh; __cacheline_group_end_aligned(read_mostly); __cacheline_group_begin_aligned(read_write); @@ -735,6 +777,7 @@ struct idpf_buf_queue { u32 next_to_clean; u32 next_to_alloc; + u32 pending; u32 hdr_truesize; u32 truesize; __cacheline_group_end_aligned(read_write); @@ -755,7 +798,9 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32); /** * struct idpf_compl_queue - software structure representing a completion queue - * @comp: completion descriptor array + * @comp: 8-byte completion descriptor array + * @comp_4b: 4-byte completion descriptor array + * @desc_ring: virtual descriptor ring address * @txq_grp: See struct idpf_txq_group * @flags: See enum idpf_queue_flags_t * @desc_count: Number of descriptors @@ -775,7 +820,12 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32); */ struct idpf_compl_queue { __cacheline_group_begin_aligned(read_mostly); - struct idpf_splitq_tx_compl_desc *comp; + union { + struct idpf_splitq_tx_compl_desc *comp; + struct idpf_splitq_4b_tx_compl_desc *comp_4b; + + void *desc_ring; + }; struct idpf_txq_group *txq_grp; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); @@ -902,7 +952,6 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers - * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -917,7 +966,6 @@ struct idpf_txq_group { u16 num_txq; struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; - struct idpf_txq_stash *stashes; struct idpf_compl_queue *complq; @@ -1010,6 +1058,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector) reg->dyn_ctl); } +/** + * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq + * @refillq: pointer to refillq containing buf_ids + */ +static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq) +{ + return (refillq->next_to_use > refillq->next_to_clean ? + 0 : refillq->desc_count) + + refillq->next_to_use - refillq->next_to_clean - 1; +} + int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -1030,23 +1089,30 @@ int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); -void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, - unsigned int size); -struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size); + +struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, + u32 q_num); +struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, + u32 q_num); +int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en); + void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, - struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, - struct sk_buff *skb); +unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq, + struct sk_buff *skb, u32 *buf_count); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q); netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev); bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); +bool idpf_rx_process_skb_fields(struct sk_buff *skb, + const struct libeth_xdp_buff *xdp, + struct libeth_rq_napi_stats *rs); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); +void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq); + #endif /* !_IDPF_TXRX_H_ */ |