summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-03-28 06:30:26 +0300
committerJakub Kicinski <kuba@kernel.org>2026-03-28 06:30:26 +0300
commitc11c731a684e5e4e377e7e22f9fc2f29ce1478c7 (patch)
treec46b66d5a070e7f181d0d1611839c73b9dc541bb
parent2edfa31769a4add828a7e604b21cb82aaaa05925 (diff)
parentf3567dd428b264b3f06f881e5e85a738c7c910df (diff)
downloadlinux-c11c731a684e5e4e377e7e22f9fc2f29ce1478c7.tar.xz
Merge branch 'fix-page-fragment-handling-when-page_size-4k'
Dimitri Daskalakis says: ==================== Fix page fragment handling when PAGE_SIZE > 4K FBNIC operates on fixed size descriptors (4K). When the OS supports pages larger than 4K, we fragment the page across multiple descriptors. While performance testing, I found several issues with our page fragment handling, resulting in low throughput and potential RX stalls. ==================== Link: https://patch.msgid.link/20260324195123.3486219-1-dimitri.daskalakis1@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index 08270db2dee8..3c4563c8f403 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -197,7 +197,7 @@ static int fbnic_dbg_bdq_desc_seq_show(struct seq_file *s, void *v)
return 0;
}
- for (i = 0; i <= ring->size_mask; i++) {
+ for (i = 0; i < (ring->size_mask + 1) * FBNIC_BD_FRAG_COUNT; i++) {
u64 bd = le64_to_cpu(ring->desc[i]);
seq_printf(s, "%04x %#04llx %#014llx\n", i,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 9fb91d4f3971..9cd85a0d0c3a 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -927,7 +927,7 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq)
/* Force DMA writes to flush before writing to tail */
dma_wmb();
- writel(i, bdq->doorbell);
+ writel(i * FBNIC_BD_FRAG_COUNT, bdq->doorbell);
}
}
@@ -2564,7 +2564,7 @@ static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
hpq->tail = 0;
hpq->head = 0;
- log_size = fls(hpq->size_mask);
+ log_size = fls(hpq->size_mask) + ilog2(FBNIC_BD_FRAG_COUNT);
/* Store descriptor ring address and size */
fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
@@ -2576,7 +2576,7 @@ static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
if (!ppq->size_mask)
goto write_ctl;
- log_size = fls(ppq->size_mask);
+ log_size = fls(ppq->size_mask) + ilog2(FBNIC_BD_FRAG_COUNT);
/* Add enabling of PPQ to BDQ control */
bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;