diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 296 |
1 files changed, 109 insertions, 187 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index b774517aa9fa..8e314003b63a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -41,183 +41,6 @@ #include "iwl-agn-calib.h" #include "iwl-agn.h" -/****************************************************************************** - * - * RX path functions - * - ******************************************************************************/ - -/* - * Rx theory of operation - * - * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), - * each of which point to Receive Buffers to be filled by the NIC. These get - * used not only for Rx frames, but for any command response or notification - * from the NIC. The driver and NIC manage the Rx buffers by means - * of indexes into the circular buffer. - * - * Rx Queue Indexes - * The host/firmware share two index registers for managing the Rx buffers. - * - * The READ index maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ index is managed by the firmware once the card is enabled. - * - * The WRITE index maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * INDEX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ index - * and fire the RX interrupt. The driver can then query the READ index and - * process as many packets as possible, moving the WRITE index forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * iwl_rx_queue_alloc() Allocates rx_free - * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_rx_queue_restock - * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_rx_replenish - * - * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the - * READ INDEX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls iwl_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * iwl_rx_queue_space - Return number of free slots available in queue. - */ -int iwl_rx_queue_space(const struct iwl_rx_queue *q) -{ - int s = q->read - q->write; - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; -} - -/** - * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue - */ -void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) -{ - unsigned long flags; - u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; - u32 reg; - - spin_lock_irqsave(&q->lock, flags); - - if (q->need_update == 0) - goto exit_unlock; - - if (priv->cfg->base_params->shadow_reg_enable) { - /* shadow register enabled */ - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual); - } else { - /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { - reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, - "Rx queue requesting wakeup," - " GP1 = 0x%x\n", reg); - iwl_set_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } - - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - } - } - q->need_update = 0; - - exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); -} - -int iwl_rx_queue_alloc(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct device *dev = &priv->pci_dev->dev; - int i; - - spin_lock_init(&rxq->lock); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - - /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, - GFP_KERNEL); - if (!rxq->bd) - goto err_bd; - - rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb; - - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - rxq->need_update = 0; - return 0; - -err_rb: - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); -err_bd: - return -ENOMEM; -} /****************************************************************************** * @@ -347,7 +170,7 @@ static bool iwl_good_ack_health(struct iwl_priv *priv, int actual_delta, expected_delta, ba_timeout_delta; struct statistics_tx *old; - if (priv->_agn.agg_tids_count) + if (priv->agg_tids_count) return true; old = &priv->statistics.tx; @@ -665,8 +488,8 @@ static void iwl_rx_statistics(struct iwl_priv *priv, iwl_rx_calc_noise(priv); queue_work(priv->workqueue, &priv->run_time_calib_work); } - if (priv->cfg->ops->lib->temp_ops.temperature && change) - priv->cfg->ops->lib->temp_ops.temperature(priv); + if (priv->cfg->lib->temperature && change) + priv->cfg->lib->temperature(priv); } static void iwl_rx_reply_statistics(struct iwl_priv *priv, @@ -769,8 +592,8 @@ static void iwl_rx_reply_rx_phy(struct iwl_priv *priv, { struct iwl_rx_packet *pkt = rxb_addr(rxb); - priv->_agn.last_phy_res_valid = true; - memcpy(&priv->_agn.last_phy_res, pkt->u.raw, + priv->last_phy_res_valid = true; + memcpy(&priv->last_phy_res, pkt->u.raw, sizeof(struct iwl_rx_phy_res)); } @@ -943,6 +766,47 @@ static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) return decrypt_out; } +/* Calc max signal level (dBm) among 3 possible receivers */ +static int iwlagn_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host + */ + struct iwlagn_non_cfg_phy *ncphy = + (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 val, rssi_a, rssi_b, rssi_c, max_rssi; + u8 agc; + + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); + agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. + */ + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); + rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> + IWLAGN_OFDM_RSSI_A_BIT_POS; + rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> + IWLAGN_OFDM_RSSI_B_BIT_POS; + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); + rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> + IWLAGN_OFDM_RSSI_C_BIT_POS; + + max_rssi = max_t(u32, rssi_a, rssi_b); + max_rssi = max_t(u32, max_rssi, rssi_c); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + rssi_a, rssi_b, rssi_c, max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWLAGN_RSSI_OFFSET; +} + /* Called for REPLY_RX (legacy ABG frames), or * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ static void iwl_rx_reply_rx(struct iwl_priv *priv, @@ -977,11 +841,11 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv, phy_res->cfg_phy_cnt + len); ampdu_status = le32_to_cpu(rx_pkt_status); } else { - if (!priv->_agn.last_phy_res_valid) { + if (!priv->last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); return; } - phy_res = &priv->_agn.last_phy_res; + phy_res = &priv->last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); @@ -1024,7 +888,7 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv, priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res); + rx_status.signal = iwlagn_calc_rssi(priv, phy_res); iwl_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", @@ -1102,6 +966,64 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) /* block ack */ handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; - /* Set up hardware specific Rx handlers */ - priv->cfg->ops->lib->rx_handler_setup(priv); + /* init calibration handlers */ + priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = + iwlagn_rx_calib_result; + priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; + + /* set up notification wait support */ + spin_lock_init(&priv->notif_wait_lock); + INIT_LIST_HEAD(&priv->notif_waits); + init_waitqueue_head(&priv->notif_waitq); + + /* Set up BT Rx handlers */ + if (priv->cfg->lib->bt_rx_handler_setup) + priv->cfg->lib->bt_rx_handler_setup(priv); + +} + +void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + if (!list_empty(&priv->notif_waits)) { + struct iwl_notification_wait *w; + + spin_lock(&priv->notif_wait_lock); + list_for_each_entry(w, &priv->notif_waits, list) { + if (w->cmd != pkt->hdr.cmd) + continue; + IWL_DEBUG_RX(priv, + "Notif: %s, 0x%02x - wake the callers up\n", + get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + w->triggered = true; + if (w->fn) + w->fn(priv, pkt, w->fn_data); + } + spin_unlock(&priv->notif_wait_lock); + + wake_up_all(&priv->notif_waitq); + } + + if (priv->pre_rx_handler) + priv->pre_rx_handler(priv, rxb); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "No handler needed for %s, 0x%02x\n", + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + } } |