summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c82
1 files changed, 30 insertions, 52 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index fe64febf7436..98f26a4b835f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -245,6 +247,8 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
i = 0;
}
+ tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
@@ -282,13 +286,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -306,8 +310,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
@@ -396,11 +399,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
@@ -420,10 +423,15 @@ out_drop:
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
@@ -441,47 +449,23 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
- break;
-
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -495,13 +479,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
@@ -521,11 +499,11 @@ fetch_next_txq_desc:
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
@@ -533,7 +511,7 @@ fetch_next_txq_desc:
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);