summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7996/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c326
1 files changed, 258 insertions, 68 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index c8bef0b2a144..659015f93d32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -17,7 +17,7 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
idx -= MT_TXQ_ID(0);
- if (phy->mt76->band_idx == MT_BAND2)
+ if (wed == &dev->mt76.mmio.wed_hif2)
flags = MT_WED_Q_TX(0);
else
flags = MT_WED_Q_TX(idx);
@@ -83,36 +83,74 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
break;
}
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* band0 */
RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
MT7996_RXQ_RRO_BAND0);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
- MT7996_RXQ_MSDU_PG_BAND0);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
- MT7996_RXQ_TXFREE0);
- /* band1 */
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
- MT7996_RXQ_MSDU_PG_BAND1);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
- MT7996_RXQ_RRO_BAND2);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
- MT7996_RXQ_MSDU_PG_BAND2);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
- MT7996_RXQ_TXFREE2);
-
- RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
- MT7996_RXQ_RRO_IND);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ if (is_mt7996(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
+ MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
+ } else {
+ RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND1,
+ MT7996_RXQ_RRO_BAND1);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
+ MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ else
+ RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
+ MT_INT_RX_DONE_RRO_RXDMAD_C,
+ MT7996_RXQ_RRO_RXDMAD_C);
}
/* data tx queue */
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
if (is_mt7996(&dev->mt76)) {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ if (dev->hif2) {
+ /* default bn1:ring19 bn2:ring21 */
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0/1:ring18 bn2:ring19 */
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
} else {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ if (dev->hif2) {
+ /* bn0:ring18 bn1:ring21 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0:ring18 bn1:ring19 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
}
/* mcu tx queue */
@@ -166,11 +204,12 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
/* Rx TxFreeDone From MAC Rings */
val = is_mt7996(&dev->mt76) ? 4 : 8;
- if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
+ is_mt7990(&dev->mt76))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
if (is_mt7990(&dev->mt76) && dev->hif2)
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
- else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
/* Rx Data Rings */
@@ -179,7 +218,7 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
/* Rx RRO Rings */
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
@@ -288,11 +327,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (mt7996_band_valid(dev, MT_BAND1))
+ if (mt7996_band_valid(dev, MT_BAND1)) {
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (is_mt7992(&dev->mt76) && dev->hif2)
+ irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
+ }
if (mt7996_band_valid(dev, MT_BAND2))
- irq_mask |= MT_INT_BAND2_RX_DONE;
+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
if (mtk_wed_device_active(wed) && wed_reset) {
u32 wed_irq_mask = irq_mask;
@@ -378,13 +420,48 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND |
- MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ if (is_mt7996(&dev->mt76))
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ else
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
/* AXI read outstanding number */
mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+ if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_5_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x1));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x1));
+ } else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_8_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x2));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x2));
+ }
+
/* WFDMA rx threshold */
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
@@ -397,27 +474,58 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
- dev->has_rro)
+ mt7996_has_hwrro(dev)) {
+ u32 intr = is_mt7996(&dev->mt76) ?
+ MT_WFDMA0_RX_INT_SEL_RING6 :
+ MT_WFDMA0_RX_INT_SEL_RING9 |
+ MT_WFDMA0_RX_INT_SEL_RING5;
+
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
- MT_WFDMA0_RX_INT_SEL_RING6);
- else
+ intr);
+ } else {
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
MT_WFDMA0_RX_INT_SEL_RING3);
+ }
}
mt7996_dma_start(dev, reset, true);
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 irq_mask;
int ret;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ /* rxdmad_c */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
+ else
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RRO_AP_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* We need to set cpu idx pointer before resetting the EMI
+ * queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], true);
+ goto start_hw_rro;
+ }
+
/* ind cmd */
mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
MT_RXQ_ID(MT_RXQ_RRO_IND),
MT7996_RX_RING_SIZE,
@@ -428,7 +536,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band0 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
MT7996_RX_RING_SIZE,
@@ -437,11 +547,13 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (mt7996_band_valid(dev, MT_BAND1)) {
+ if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
/* rx msdu page queue for band1 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
MT7996_RX_RING_SIZE,
@@ -455,7 +567,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band2 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
MT7996_RX_RING_SIZE,
@@ -465,15 +579,42 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
return ret;
}
- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
- MT_INT_TX_DONE_BAND2;
- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
- mt7996_irq_enable(dev, irq_mask);
+start_hw_rro:
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ irq_mask = mdev->mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+ } else {
+ if (is_mt7996(&dev->mt76)) {
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
+ mt76_dma_rx_poll);
+ }
+
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
+ mt76_dma_rx_poll);
+ }
+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
+ }
return 0;
}
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
@@ -560,7 +701,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
- if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed) &&
+ ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
+ (is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
@@ -615,7 +758,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
- if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
@@ -630,6 +773,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
} else if (mt7996_band_valid(dev, MT_BAND1)) {
/* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
+ dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
@@ -641,6 +789,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7992 band1 */
if (mt7996_has_wa(dev)) {
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
+ MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -651,12 +805,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
}
}
- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
- dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
@@ -665,23 +819,44 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* tx free notify event from WA for band0 */
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ if (is_mt7992(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
- MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
- MT7996_RX_MCU_RING_SIZE,
- MT7996_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
- if (ret)
- return ret;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
@@ -752,6 +927,10 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mt76_tx_status_check(&dev->mt76, true);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
+
/* reset wfsys */
if (force)
mt7996_wfsys_reset(dev);
@@ -775,21 +954,32 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
- mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
- continue;
+ struct mt76_queue *q = &dev->mt76.q_rx[i];
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mt76_queue_is_wed_rro(q) ||
+ mt76_queue_is_wed_tx_free(q)) {
+ if (force && mt76_queue_is_wed_rro_data(q))
+ mt76_queue_reset(dev, q, false);
+ continue;
+ }
+ }
+ mt76_queue_reset(dev, q, true);
}
mt76_tx_status_check(&dev->mt76, true);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
+ (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
+ continue;
+
mt76_queue_rx_reset(dev, i);
+ }
mt7996_dma_enable(dev, !force);
}