diff options
author | Samuel Ortiz <sameo@linux.intel.com> | 2009-11-24 06:33:31 +0300 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-11-28 23:04:44 +0300 |
commit | a7af530d45969a63e20708417b70c547596ce3a9 (patch) | |
tree | c643269ad98d2689e1a011e13e2568615f01b0df /drivers/net/wireless/iwmc3200wifi/tx.c | |
parent | 2351178c52fedf1846c84b35418f4102487ec00e (diff) | |
download | linux-a7af530d45969a63e20708417b70c547596ce3a9.tar.xz |
iwmc3200wifi: 802.11n Tx aggregation support
To support 802.11n Tx aggregation support with iwmc3200 wifi, we have to
handle the UMAC_CMD_OPCODE_STOP_RESUME_STA_TX notification from the UMAC.
Before sending an AddBA, the UMAC synchronizes with the host in order to
know what is the last Tx frame it's supposed to receive before it will be
able to start the actual aggregation session.
We thus have to keep track of the last sequence number that is scheduled
for transmission on a particular RAxTID, send an answer to the UMAC with
this sequence number. The UMAC then does the BA negociation and once it's
done with it sends a new UMAC_CMD_OPCODE_STOP_RESUME_STA_TX notification
to let us know that we can resume the Tx flow on the specified RAxTID.
Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
Reviewed-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwmc3200wifi/tx.c')
-rw-r--r-- | drivers/net/wireless/iwmc3200wifi/tx.c | 57 |
1 files changed, 50 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c index e3b4f7902daf..01cc2101e682 100644 --- a/drivers/net/wireless/iwmc3200wifi/tx.c +++ b/drivers/net/wireless/iwmc3200wifi/tx.c @@ -329,7 +329,7 @@ static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, memcpy(buf + sizeof(*hdr), skb->data, skb->len); - return 0; + return umac_cmd.seq_num; } static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, @@ -361,9 +361,10 @@ void iwm_tx_worker(struct work_struct *work) struct iwm_priv *iwm; struct iwm_tx_info *tx_info = NULL; struct sk_buff *skb; - int cmdlen, ret; struct iwm_tx_queue *txq; - int pool_id; + struct iwm_sta_info *sta_info; + struct iwm_tid_info *tid_info; + int cmdlen, ret, pool_id; txq = container_of(work, struct iwm_tx_queue, worker); iwm = container_of(txq, struct iwm_priv, txq[txq->id]); @@ -373,8 +374,40 @@ void iwm_tx_worker(struct work_struct *work) while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && !skb_queue_empty(&txq->queue)) { + spin_lock_bh(&txq->lock); skb = skb_dequeue(&txq->queue); + spin_unlock_bh(&txq->lock); + tx_info = skb_to_tx_info(skb); + sta_info = &iwm->sta_table[tx_info->sta]; + if (!sta_info->valid) { + IWM_ERR(iwm, "Trying to send a frame to unknown STA\n"); + kfree_skb(skb); + continue; + } + + tid_info = &sta_info->tid_info[tx_info->tid]; + + mutex_lock(&tid_info->mutex); + + /* + * If the RAxTID is stopped, we queue the skb to the stopped + * queue. + * Whenever we'll get a UMAC notification to resume the tx flow + * for this RAxTID, we'll merge back the stopped queue into the + * regular queue. See iwm_ntf_stop_resume_tx() from rx.c. + */ + if (tid_info->stopped) { + IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n", + tx_info->sta, tx_info->tid); + spin_lock_bh(&txq->lock); + skb_queue_tail(&txq->stopped_queue, skb); + spin_unlock_bh(&txq->lock); + + mutex_unlock(&tid_info->mutex); + continue; + } + cmdlen = IWM_UDMA_HDR_LEN + skb->len; IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " @@ -393,13 +426,20 @@ void iwm_tx_worker(struct work_struct *work) if (ret) { IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " "%d, Tx worker stopped\n", txq->id); + spin_lock_bh(&txq->lock); skb_queue_head(&txq->queue, skb); + spin_unlock_bh(&txq->lock); + + mutex_unlock(&tid_info->mutex); break; } txq->concat_ptr = txq->concat_buf + txq->concat_count; - iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); + tid_info->last_seq_num = + iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); txq->concat_count += ALIGN(cmdlen, 16); + + mutex_unlock(&tid_info->mutex); #endif kfree_skb(skb); } @@ -419,14 +459,14 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct iwm_priv *iwm = ndev_to_iwm(netdev); struct net_device *ndev = iwm_to_ndev(iwm); struct wireless_dev *wdev = iwm_to_wdev(iwm); - u8 *dst_addr; struct iwm_tx_info *tx_info; struct iwm_tx_queue *txq; struct iwm_sta_info *sta_info; - u8 sta_id; + u8 *dst_addr, sta_id; u16 queue; int ret; + if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " "not associated\n"); @@ -440,7 +480,8 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) txq = &iwm->txq[queue]; /* No free space for Tx, tx_worker is too slow */ - if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) { + if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) || + (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) { IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); netif_stop_subqueue(netdev, queue); return NETDEV_TX_BUSY; @@ -477,7 +518,9 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) else tx_info->tid = IWM_UMAC_MGMT_TID; + spin_lock_bh(&iwm->txq[queue].lock); skb_queue_tail(&iwm->txq[queue].queue, skb); + spin_unlock_bh(&iwm->txq[queue].lock); queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); |