diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c | 173 |
1 files changed, 108 insertions, 65 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 993c354aa27a..61e613066140 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ -#include <net/libeth/rx.h> -#include <net/libeth/tx.h> +#include <net/libeth/xdp.h> #include "idpf.h" @@ -180,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, } /** + * idpf_tx_singleq_dma_map_error - handle TX DMA map errors + * @txq: queue to send buffer on + * @skb: send buffer + * @first: original first buffer info buffer for packet + * @idx: starting point on ring to unwind + */ +static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq, + struct sk_buff *skb, + struct idpf_tx_buf *first, u16 idx) +{ + struct libeth_sq_napi_stats ss = { }; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; + + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + /* clear dma mappings for failed tx_buf map */ + for (;;) { + struct idpf_tx_buf *tx_buf; + + tx_buf = &txq->tx_buf[idx]; + libeth_tx_complete(tx_buf, &cp); + if (tx_buf == first) + break; + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + if (skb_is_gso(skb)) { + union idpf_tx_flex_desc *tx_desc; + + /* If we failed a DMA mapping for a TSO packet, we will have + * used one additional descriptor for a context + * descriptor. Reset that here. + */ + tx_desc = &txq->flex_tx[idx]; + memset(tx_desc, 0, sizeof(*tx_desc)); + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + /* Update tail in case netdev_xmit_more was previously true */ + idpf_tx_buf_hw_update(txq, idx, false); +} + +/** * idpf_tx_singleq_map - Build the Tx base descriptor * @tx_q: queue to send buffer on * @first: first buffer info buffer to use @@ -219,8 +270,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; - if (dma_mapping_error(tx_q->dev, dma)) - return idpf_tx_dma_map_error(tx_q, skb, first, i); + if (unlikely(dma_mapping_error(tx_q->dev, dma))) + return idpf_tx_singleq_dma_map_error(tx_q, skb, + first, i); /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); @@ -362,11 +414,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + u32 count, buf_count = 1; int csum, tso, needed; - unsigned int count; __be16 protocol; - count = idpf_tx_desc_count_required(tx_q, skb); + count = idpf_tx_res_count_required(tx_q, skb, &buf_count); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); @@ -602,7 +654,7 @@ static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) + if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded)) return; /* check if HW has decoded the packet and checksum */ @@ -741,7 +793,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q, { u64 mask, qw1; - if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) + if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded)) return; mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; @@ -769,7 +821,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, const union virtchnl2_rx_desc *rx_desc, struct libeth_rx_pt decoded) { - if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) + if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded)) return; if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, @@ -781,7 +833,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, } /** - * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx + * __idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx * descriptor * @rx_q: Rx ring being processed * @skb: pointer to current skb being populated @@ -793,17 +845,14 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, * other fields within the skb. */ static void -idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, - struct sk_buff *skb, - const union virtchnl2_rx_desc *rx_desc, - u16 ptype) +__idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, + struct sk_buff *skb, + const union virtchnl2_rx_desc *rx_desc, + u16 ptype) { struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype]; struct libeth_rx_csum csum_bits; - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_q->netdev); - /* Check if we're using base mode descriptor IDs */ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded); @@ -814,7 +863,6 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, } idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded); - skb_record_rx_queue(skb, rx_q->idx); } /** @@ -950,6 +998,32 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } +static bool +idpf_rx_singleq_process_skb_fields(struct sk_buff *skb, + const struct libeth_xdp_buff *xdp, + struct libeth_rq_napi_stats *rs) +{ + struct libeth_rqe_info fields; + struct idpf_rx_queue *rxq; + + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq); + + idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields); + __idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc, + fields.ptype); + + return true; +} + +static void idpf_xdp_run_pass(struct libeth_xdp_buff *xdp, + struct napi_struct *napi, + struct libeth_rq_napi_stats *rs, + const union virtchnl2_rx_desc *desc) +{ + libeth_xdp_run_pass(xdp, NULL, napi, rs, desc, NULL, + idpf_rx_singleq_process_skb_fields); +} + /** * idpf_rx_singleq_clean - Reclaim resources after receive completes * @rx_q: rx queue to clean @@ -959,14 +1033,15 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, */ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) { - unsigned int total_rx_bytes = 0, total_rx_pkts = 0; - struct sk_buff *skb = rx_q->skb; + struct libeth_rq_napi_stats rs = { }; u16 ntc = rx_q->next_to_clean; + LIBETH_XDP_ONSTACK_BUFF(xdp); u16 cleaned_count = 0; - bool failure = false; + + libeth_xdp_init_buff(xdp, &rx_q->xdp, &rx_q->xdp_rxq); /* Process Rx packets bounded by budget */ - while (likely(total_rx_pkts < (unsigned int)budget)) { + while (likely(rs.packets < budget)) { struct libeth_rqe_info fields = { }; union virtchnl2_rx_desc *rx_desc; struct idpf_rx_buf *rx_buf; @@ -993,73 +1068,41 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); rx_buf = &rx_q->rx_buf[ntc]; - if (!libeth_rx_sync_for_cpu(rx_buf, fields.len)) - goto skip_data; - - if (skb) - idpf_rx_add_frag(rx_buf, skb, fields.len); - else - skb = idpf_rx_build_skb(rx_buf, fields.len); - - /* exit if we failed to retrieve a buffer */ - if (!skb) - break; - -skip_data: - rx_buf->page = NULL; + libeth_xdp_process_buff(xdp, rx_buf, fields.len); + rx_buf->netmem = 0; IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; /* skip if it is non EOP desc */ - if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb)) + if (idpf_rx_singleq_is_non_eop(rx_desc) || + unlikely(!xdp->data)) continue; #define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M) if (unlikely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_ERR_S))) { - dev_kfree_skb_any(skb); - skb = NULL; + libeth_xdp_return_buff_slow(xdp); continue; } - /* pad skb if needed (to make valid ethernet frame) */ - if (eth_skb_pad(skb)) { - skb = NULL; - continue; - } - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - - /* protocol */ - idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, - fields.ptype); - - /* send completed skb up the stack */ - napi_gro_receive(rx_q->pp->p.napi, skb); - skb = NULL; - - /* update budget accounting */ - total_rx_pkts++; + idpf_xdp_run_pass(xdp, rx_q->pp->p.napi, &rs, rx_desc); } - rx_q->skb = skb; - rx_q->next_to_clean = ntc; + libeth_xdp_save_buff(&rx_q->xdp, xdp); page_pool_nid_changed(rx_q->pp, numa_mem_id()); if (cleaned_count) - failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); + idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); u64_stats_update_begin(&rx_q->stats_sync); - u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts); - u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes); + u64_stats_add(&rx_q->q_stats.packets, rs.packets); + u64_stats_add(&rx_q->q_stats.bytes, rs.bytes); u64_stats_update_end(&rx_q->stats_sync); - /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : (int)total_rx_pkts; + return rs.packets; } /** |