diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/drv.c | 55 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/internal.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 123 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 73 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 40 |
5 files changed, 196 insertions, 97 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 8cb53ec2b77b..ff13458efc27 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -78,6 +78,7 @@ /* Hardware specific file defines the PCI IDs table for that hardware module */ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +#if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ @@ -129,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ @@ -253,13 +255,60 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +#endif /* CONFIG_IWLDVM */ +#if IS_ENABLED(CONFIG_IWLMVM) /* 7000 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + +/* 3160 Series */ + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, +#endif /* CONFIG_IWLMVM */ {0} }; diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 148843e7f34f..b654dcdd048a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf { * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index * @active: stores if queue is active + * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. @@ -232,6 +233,7 @@ struct iwl_txq { struct iwl_trans_pcie *trans_pcie; u8 need_update; u8 active; + bool ampdu; }; static inline dma_addr_t diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 567e67ad1f61..f600e68a410a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -110,9 +110,10 @@ /* * iwl_rxq_space - Return number of free slots available in queue. */ -static int iwl_rxq_space(const struct iwl_rxq *q) +static int iwl_rxq_space(const struct iwl_rxq *rxq) { - int s = q->read - q->write; + int s = rxq->read - rxq->write; + if (s <= 0) s += RX_QUEUE_SIZE; /* keep some buffer to not confuse full and empty queue */ @@ -143,21 +144,22 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_rxq *rxq) { unsigned long flags; u32 reg; - spin_lock_irqsave(&q->lock, flags); + spin_lock_irqsave(&rxq->lock, flags); - if (q->need_update == 0) + if (rxq->need_update == 0) goto exit_unlock; if (trans->cfg->base_params->shadow_reg_enable) { /* shadow register enabled */ /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); + rxq->write_actual = (rxq->write & ~0x7); + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } else { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -175,22 +177,22 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) goto exit_unlock; } - q->write_actual = (q->write & ~0x7); + rxq->write_actual = (rxq->write & ~0x7); iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - q->write_actual); + rxq->write_actual); /* Else device is assumed to be awake */ } else { /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); + rxq->write_actual = (rxq->write & ~0x7); iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - q->write_actual); + rxq->write_actual); } } - q->need_update = 0; + rxq->need_update = 0; exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); + spin_unlock_irqrestore(&rxq->lock, flags); } /* @@ -355,19 +357,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) struct iwl_rxq *rxq = &trans_pcie->rxq; int i; - /* Fill the rx_used queue with _all_ of the Rx buffers */ + lockdep_assert_held(&rxq->lock); + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rxq->pool[i].page, - trans_pcie->rx_page_order); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + if (!rxq->pool[i].page) + continue; + dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); + rxq->pool[i].page = NULL; } } @@ -491,6 +490,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +{ + int i; + + lockdep_assert_held(&rxq->lock); + + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add(&rxq->pool[i].list, &rxq->rx_used); +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -505,13 +518,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) } spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - INIT_WORK(&trans_pcie->rx_replenish, - iwl_pcie_rx_replenish_work); + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); + /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); + iwl_pcie_rx_init_rxb_lists(rxq); for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; @@ -520,7 +532,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; - rxq->free_count = 0; memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock_irqrestore(&rxq->lock, flags); @@ -802,9 +813,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) u32 handled = 0; unsigned long flags; u32 i; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_mask; -#endif lock_map_acquire(&trans->sync_cmd_lockdep_map); @@ -826,14 +834,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) inta = trans_pcie->inta; -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - /* just for debug */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); + if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", - inta, inta_mask); - } -#endif + inta, iwl_read32(trans, CSR_INT_MASK)); /* saved interrupt in inta variable now we can reset trans_pcie->inta */ trans_pcie->inta = 0; @@ -855,12 +858,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) goto out; } -#ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " - "the frame/frames.\n"); + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } @@ -870,7 +872,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->alive++; } } -#endif + /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); @@ -886,6 +888,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (hw_rfkill) { + /* + * Clear the interrupt in APMG if the NIC is going down. + * Note that when the NIC exits RFkill (else branch), we + * can't access prph and the NIC will be reset in + * start_hw anyway. + */ + iwl_write_prph(trans, APMG_RTC_INT_STT_REG, + APMG_RTC_INT_STT_RFKILL); set_bit(STATUS_RFKILL, &trans_pcie->status); if (test_and_clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) @@ -1118,9 +1128,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta, inta_mask; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_fh; -#endif lockdep_assert_held(&trans_pcie->irq_lock); @@ -1159,13 +1166,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " - "fh 0x%08x\n", inta, inta_mask, inta_fh); - } -#endif + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, + "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, + iwl_read32(trans, CSR_FH_INT_STATUS)); trans_pcie->inta |= inta; /* the thread will service interrupts and re-enable them */ @@ -1198,7 +1203,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie; - u32 inta, inta_mask; + u32 inta; u32 val = 0; u32 read; unsigned long flags; @@ -1226,7 +1231,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* Ignore interrupt if there's nothing in NIC to service. @@ -1271,8 +1275,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) val |= 0x8000; inta = (0xff & val) | ((0xff00 & val) << 16); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", - inta, inta_mask, val); + IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n", + inta, trans_pcie->inta_mask, val); + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n", + iwl_read32(trans, CSR_INT_MASK)); inta &= trans_pcie->inta_mask; trans_pcie->inta |= inta; diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 50ba0a468f94..96cfcdd39079 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, { u8 *v_addr; dma_addr_t p_addr; - u32 offset; + u32 offset, chunk_sz = section->len; int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); - v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); - if (!v_addr) - return -ENOMEM; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!v_addr) { + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); + chunk_sz = PAGE_SIZE; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, + &p_addr, GFP_KERNEL); + if (!v_addr) + return -ENOMEM; + } - for (offset = 0; offset < section->len; offset += PAGE_SIZE) { + for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size; - copy_size = min_t(u32, PAGE_SIZE, section->len - offset); + copy_size = min_t(u32, chunk_sz, section->len - offset); memcpy(v_addr, (u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, @@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, } } - dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } @@ -571,13 +578,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) clear_bit(STATUS_RFKILL, &trans_pcie->status); } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { - /* let the ucode operate on its own */ - iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_disable_interrupts(trans); + + /* + * in testing mode, the host stays awake and the + * hardware won't be reset (not even partially) + */ + if (test) + return; + iwl_pcie_disable_ict(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -596,11 +607,18 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status) + enum iwl_d3_status *status, + bool test) { u32 val; int ret; + if (test) { + iwl_enable_interrupts(trans); + *status = IWL_D3_STATUS_ALIVE; + return 0; + } + iwl_pcie_set_pwr(trans, false); val = iwl_read32(trans, CSR_RESET); @@ -636,9 +654,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - *status = IWL_D3_STATUS_ALIVE; return 0; } @@ -655,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) return err; } + /* Reset the entire device */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + usleep_range(10, 15); + iwl_pcie_apm_init(trans); /* From now on, the op_mode will be kept updated about RF kill state */ @@ -823,8 +843,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, unsigned long *flags) { int ret; - struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); - spin_lock_irqsave(&pcie_trans->reg_lock, *flags); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_irqsave(&trans_pcie->reg_lock, *flags); /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, @@ -860,7 +881,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", val); - spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); return false; } } @@ -869,22 +890,22 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, * Fool sparse by faking we release the lock - sparse will * track nic_access anyway. */ - __release(&pcie_trans->reg_lock); + __release(&trans_pcie->reg_lock); return true; } static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, unsigned long *flags) { - struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - lockdep_assert_held(&pcie_trans->reg_lock); + lockdep_assert_held(&trans_pcie->reg_lock); /* * Fool sparse by faking we acquiring the lock - sparse will * track nic_access anyway. */ - __acquire(&pcie_trans->reg_lock); + __acquire(&trans_pcie->reg_lock); __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); @@ -895,7 +916,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, * scheduled on different CPUs (after we drop reg_lock). */ mmiowb(); - spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); } static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, @@ -917,11 +938,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) + const void *buf, int dwords) { unsigned long flags; int offs, ret = 0; - u32 *vals = buf; + const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans, false, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index c5e30294c5ac..c47c92165aba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; + len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; + len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } @@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, q->read_ptr); iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } + txq->active = false; spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); } /* @@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, spin_lock_bh(&txq->lock); + if (!txq->active) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", + txq_id, ssn); + goto out; + } + if (txq->q.read_ptr == tfd_num) goto out; @@ -1045,6 +1057,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { @@ -1069,6 +1085,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); + trans_pcie->txq[txq_id].ampdu = true; } else { /* * disable aggregations for the queue, this will also make the @@ -1103,6 +1120,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); + trans_pcie->txq[txq_id].active = true; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); } @@ -1125,6 +1143,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) ARRAY_SIZE(zero_val)); iwl_pcie_txq_unmap(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1518,11 +1537,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", get_cmd_string(trans_pcie, cmd->id)); + dump_stack(); ret = -EIO; goto cancel; } - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans_pcie->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; @@ -1564,7 +1585,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) return -EIO; - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans_pcie->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; @@ -1592,7 +1614,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u8 wait_write_ptr = 0; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); - u16 __maybe_unused wifi_seq; + u16 wifi_seq; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1609,13 +1631,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * the BA. * Check here that the packets are in the right place on the ring. */ -#ifdef CONFIG_IWLWIFI_DEBUG wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && - ((wifi_seq & 0xff) != q->write_ptr), + WARN_ONCE(trans_pcie->txq[txq_id].ampdu && + (wifi_seq & 0xff) != q->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, q->write_ptr); -#endif /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; |