diff options
author | Michal Kazior <michal.kazior@tieto.com> | 2016-05-19 11:37:48 +0300 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2016-06-09 12:31:27 +0300 |
commit | 80a83cfc434b1e3afe38974570b460db4898bec6 (patch) | |
tree | f2774b8d416c3341ec57312fe39c6633d380be70 /net/mac80211/tx.c | |
parent | 0662799023de4be686263b0a4f4b7910999172b9 (diff) | |
download | linux-80a83cfc434b1e3afe38974570b460db4898bec6.tar.xz |
mac80211: skip netdev queue control with software queuing
Qdiscs are designed with no regard to 802.11
aggregation requirements and hand out
packet-by-packet with no guarantee they are
destined to the same tid. This does more bad than
good no matter how fairly a given qdisc may behave
on an ethernet interface.
Software queuing used per-AC netdev subqueue
congestion control whenever a global AC limit was
hit. This meant in practice a single station or
tid queue could starve others rather easily. This
could resonate with qdiscs in a bad way or could
just end up with poor aggregation performance.
Increasing the AC limit would increase induced
latency which is also bad.
Disabling qdiscs by default and performing
taildrop instead of netdev subqueue congestion
control on the other hand makes it possible for
tid queues to fill up "in the meantime" while
preventing stations starving each other.
This increases aggregation opportunities and
should allow software queuing based drivers
achieve better performance by utilizing airtime
more efficiently with big aggregates.
Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 75 |
1 files changed, 43 insertions, 32 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 203044379ce0..3e77da195ce8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1236,27 +1236,21 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, return TX_CONTINUE; } -static void ieee80211_drv_tx(struct ieee80211_local *local, - struct ieee80211_vif *vif, - struct ieee80211_sta *pubsta, - struct sk_buff *skb) +static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_control control = { - .sta = pubsta, - }; struct ieee80211_txq *txq = NULL; - struct txq_info *txqi; - u8 ac; if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) - goto tx_normal; + return NULL; if (!ieee80211_is_data(hdr->frame_control)) - goto tx_normal; + return NULL; if (pubsta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; @@ -1267,25 +1261,28 @@ static void ieee80211_drv_tx(struct ieee80211_local *local, } if (!txq) - goto tx_normal; + return NULL; - ac = txq->ac; - txqi = to_txq_info(txq); - atomic_inc(&sdata->txqs_len[ac]); - if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending) - netif_stop_subqueue(sdata->dev, ac); + return to_txq_info(txq); +} - spin_lock_bh(&txqi->queue.lock); - txqi->byte_cnt += skb->len; - __skb_queue_tail(&txqi->queue, skb); - spin_unlock_bh(&txqi->queue.lock); +static void ieee80211_txq_enqueue(struct ieee80211_local *local, + struct txq_info *txqi, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txqi->txq.vif); - drv_wake_tx_queue(local, txqi); + lockdep_assert_held(&txqi->queue.lock); - return; + if (atomic_read(&sdata->num_tx_queued) >= TOTAL_MAX_TX_BUFFER || + txqi->queue.qlen >= STA_MAX_TX_BUFFER) { + ieee80211_free_txskb(&local->hw, skb); + return; + } -tx_normal: - drv_tx(local, &control, skb); + atomic_inc(&sdata->num_tx_queued); + txqi->byte_cnt += skb->len; + __skb_queue_tail(&txqi->queue, skb); } struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, @@ -1296,7 +1293,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct txq_info *txqi = container_of(txq, struct txq_info, txq); struct ieee80211_hdr *hdr; struct sk_buff *skb = NULL; - u8 ac = txq->ac; spin_lock_bh(&txqi->queue.lock); @@ -1307,12 +1303,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, if (!skb) goto out; + atomic_dec(&sdata->num_tx_queued); txqi->byte_cnt -= skb->len; - atomic_dec(&sdata->txqs_len[ac]); - if (__netif_subqueue_stopped(sdata->dev, ac)) - ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]); - hdr = (struct ieee80211_hdr *)skb->data; if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) { struct sta_info *sta = container_of(txq->sta, struct sta_info, @@ -1343,7 +1336,9 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, struct sk_buff_head *skbs, bool txpending) { + struct ieee80211_tx_control control = {}; struct sk_buff *skb, *tmp; + struct txq_info *txqi; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { @@ -1358,6 +1353,21 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, } #endif + txqi = ieee80211_get_txq(local, vif, sta, skb); + if (txqi) { + info->control.vif = vif; + + __skb_unlink(skb, skbs); + + spin_lock_bh(&txqi->queue.lock); + ieee80211_txq_enqueue(local, txqi, skb); + spin_unlock_bh(&txqi->queue.lock); + + drv_wake_tx_queue(local, txqi); + + continue; + } + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || (!txpending && !skb_queue_empty(&local->pending[q]))) { @@ -1400,9 +1410,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info->control.vif = vif; + control.sta = sta; __skb_unlink(skb, skbs); - ieee80211_drv_tx(local, vif, sta, skb); + drv_tx(local, &control, skb); } return true; |