summaryrefslogtreecommitdiff
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2022-11-12 18:40:36 +0300
committerFelix Fietkau <nbd@nbd.name>2022-12-01 19:29:14 +0300
commitc601baaf5cd89deda3643beee4b8a57ef151985c (patch)
tree57ae7fcb73f7c7570a8f4605c0fe3985e7ce06b6 /drivers/net/wireless
parentcd372b8c99c5a5cf6a464acebb7e4a79af7ec8ae (diff)
downloadlinux-c601baaf5cd89deda3643beee4b8a57ef151985c.tar.xz
wifi: mt76: add WED RX support to mt76_dma_rx_fill
Introduce the capability to refill WED RX buffers in mt76_dma_rx_fill utility routine. Tested-by: Daniel Golle <daniel@makrotopia.org> Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com> Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 4239adde4cca..cb6e3b358aca 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -550,14 +550,26 @@ free_skb:
return ret;
}
+static struct page_frag_cache *
+mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page_frag_cache *rx_page = &q->rx_page;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
+ rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
+#endif
+ return rx_page;
+}
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
- dma_addr_t addr;
- void *buf;
- int frames = 0;
+ struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
int len = SKB_WITH_OVERHEAD(q->buf_size);
- int offset = q->buf_offset;
+ int frames = 0, offset = q->buf_offset;
+ dma_addr_t addr;
if (!q->ndesc)
return 0;
@@ -565,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ struct mt76_txwi_cache *t = NULL;
struct mt76_queue_buf qbuf;
+ void *buf = NULL;
+
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ break;
+ }
- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+ buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@@ -580,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
qbuf.skip_unmap = false;
- mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
frames++;
}