diff options
author | Yu-Yen Ting <steventing@realtek.com> | 2021-04-15 11:47:02 +0300 |
---|---|---|
committer | Kalle Valo <kvalo@codeaurora.org> | 2021-04-21 12:37:42 +0300 |
commit | a548909d7ad7e334c6c923a71f0b694d60980232 (patch) | |
tree | 7f6e254e8995482ae4a1ae60ec32691d0468c3a1 /drivers/net | |
parent | 559f6cb318375e9deb01d7d0e957d0d90a2db63d (diff) | |
download | linux-a548909d7ad7e334c6c923a71f0b694d60980232.tar.xz |
rtw88: Fix potential unrecoverable tx queue stop
If there are lots of packets to be transmitted, the driver would check
whether the available descriptors are sufficient according the read/write
point of tx queue. Once the available descriptor is not enough,
ieee80211_stop_queue is called.
TX ISR, meanwhile, is releasing the tx resources after the packets are
transmitted. This routine may call ieee80211_wake_queue by checking the
available descriptor.
The potential queue stop problem would occur when the tx queue is
stopped due to the heavy traffic. Then thare is no chance to wake the
queue up because the read point is not updated immediately, as a result,
no more packets coulde be transmitted in this queue.
This patch makes sure the ieee80211_wake_queue could be called properly
and avoids the race condition when ring->r.rp, ring->queue_stopped are
updated.
Signed-off-by: Yu-Yen Ting <steventing@realtek.com>
Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/20210415084703.27255-3-pkshih@realtek.com
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/wireless/realtek/rtw88/pci.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index b8115b31839e..adea3c7551ab 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -950,10 +950,12 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev, return ret; ring = &rtwpci->tx_rings[queue]; + spin_lock_bh(&rtwpci->irq_lock); if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); ring->queue_stopped = true; } + spin_unlock_bh(&rtwpci->irq_lock); return 0; } @@ -968,7 +970,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, struct sk_buff *skb; u32 count; u32 bd_idx_addr; - u32 bd_idx, cur_rp; + u32 bd_idx, cur_rp, rp_idx; u16 q_map; ring = &rtwpci->tx_rings[hw_queue]; @@ -977,6 +979,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, bd_idx = rtw_read32(rtwdev, bd_idx_addr); cur_rp = bd_idx >> 16; cur_rp &= TRX_BD_IDX_MASK; + rp_idx = ring->r.rp; if (cur_rp >= ring->r.rp) count = cur_rp - ring->r.rp; else @@ -1000,12 +1003,15 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, } if (ring->queue_stopped && - avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { + avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { q_map = skb_get_queue_mapping(skb); ieee80211_wake_queue(hw, q_map); ring->queue_stopped = false; } + if (++rp_idx >= ring->r.len) + rp_idx = 0; + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); info = IEEE80211_SKB_CB(skb); |