diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c | 281 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 24 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 98 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 59 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 374 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 287 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 1018 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 237 |
8 files changed, 2023 insertions, 355 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c new file mode 100644 index 000000000000..b1f43397bb59 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -0,0 +1,281 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info.h" +#include "internal.h" +#include "iwl-prph.h" + +static int iwl_pcie_get_num_sections(const struct fw_img *fw, + int start) +{ + int i = 0; + + while (start < fw->num_sec && + fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && + fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { + start++; + i++; + } + + return i; +} + +static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, + const struct fw_desc *sec, + struct iwl_dram_data *dram) +{ + dram->block = dma_alloc_coherent(trans->dev, sec->len, + &dram->physical, + GFP_KERNEL); + if (!dram->block) + return -ENOMEM; + + dram->size = sec->len; + memcpy(dram->block, sec->data, sec->len); + + return 0; +} + +static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->fw) { + WARN_ON(dram->fw_cnt); + return; + } + + for (i = 0; i < dram->fw_cnt; i++) + dma_free_coherent(trans->dev, dram->fw[i].size, + dram->fw[i].block, dram->fw[i].physical); + + kfree(dram->fw); + dram->fw_cnt = 0; +} + +void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->paging) { + WARN_ON(dram->paging_cnt); + return; + } + + /* free paging*/ + for (i = 0; i < dram->paging_cnt; i++) + dma_free_coherent(trans->dev, dram->paging[i].size, + dram->paging[i].block, + dram->paging[i].physical); + + kfree(dram->paging); + dram->paging_cnt = 0; +} + +static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info *ctxt_info) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram; + int i, ret, lmac_cnt, umac_cnt, paging_cnt; + + lmac_cnt = iwl_pcie_get_num_sections(fw, 0); + /* add 1 due to separator */ + umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); + /* add 2 due to separators */ + paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); + + dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); + if (!dram->fw) + return -ENOMEM; + dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); + if (!dram->paging) + return -ENOMEM; + + /* initialize lmac sections */ + for (i = 0; i < lmac_cnt; i++) { + ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->lmac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* initialize umac sections */ + for (i = 0; i < umac_cnt; i++) { + /* access FW with +1 to make up for lmac separator */ + ret = iwl_pcie_ctxt_info_alloc_dma(trans, + &fw->sec[dram->fw_cnt + 1], + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->umac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* + * Initialize paging. + * Paging memory isn't stored in dram->fw as the umac and lmac - it is + * stored separately. + * This is since the timing of its release is different - + * while fw memory can be released on alive, the paging memory can be + * freed only when the device goes down. + * Given that, the logic here in accessing the fw image is a bit + * different - fw_cnt isn't changing so loop counter is added to it. + */ + for (i = 0; i < paging_cnt; i++) { + /* access FW with +2 to make up for lmac & umac separators */ + int fw_idx = dram->fw_cnt + i + 2; + + ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], + &dram->paging[i]); + if (ret) + return ret; + + ctxt_dram->virtual_img[i] = + cpu_to_le64(dram->paging[i].physical); + dram->paging_cnt++; + } + + return 0; +} + +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info *ctxt_info; + struct iwl_context_info_rbd_cfg *rx_cfg; + u32 control_flags = 0; + int ret; + + ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info) + return -ENOMEM; + + ctxt_info->version.version = 0; + ctxt_info->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + /* size is in DWs */ + ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); + + BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); + control_flags = IWL_CTXT_INFO_RB_SIZE_4K | + IWL_CTXT_INFO_TFD_FORMAT_LONG | + RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << + IWL_CTXT_INFO_RB_CB_SIZE_POS; + ctxt_info->control.control_flags = cpu_to_le32(control_flags); + + /* initialize RX default queue */ + rx_cfg = &ctxt_info->rbd_cfg; + rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); + rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); + rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + + /* initialize TX command queue */ + ctxt_info->hcmd_cfg.cmd_queue_addr = + cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + ctxt_info->hcmd_cfg.cmd_queue_size = + TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); + if (ret) { + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + ctxt_info, trans_pcie->ctxt_info_dma_addr); + return ret; + } + + trans_pcie->ctxt_info = ctxt_info; + + iwl_enable_interrupts(trans); + + /* Configure debug, if exists */ + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); + iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + + /* Context info will be released upon alive or failure to get one */ + + return 0; +} + +void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->ctxt_info) + return; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + trans_pcie->ctxt_info, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info = NULL; + + iwl_pcie_ctxt_info_free_fw_img(trans); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index ba8a81cb0e2b..e51760e752d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, /* 9000 Series */ @@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, /* a000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)}, #endif /* CONFIG_IWLMVM */ {0} @@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = cfg_7265d; } - if (iwl_trans->cfg->rf_id) { - if (cfg == &iwl9460_2ac_cfg && - iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { - cfg = &iwl9000lc_2ac_cfg; - iwl_trans->cfg = cfg; - } - - if (cfg == &iwla000_2ac_cfg_hr && - iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) { - cfg = &iwla000_2ac_cfg_jf; - iwl_trans->cfg = cfg; - } + if (iwl_trans->cfg->rf_id && + (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) && + iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) { + cfg = &iwla000_2ac_cfg_jf; + iwl_trans->cfg = cfg; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 10937309641a..fd4faaaa1484 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -205,11 +205,11 @@ struct iwl_cmd_meta { * into the buffer regardless of whether it should be mapped or not. * This indicates how big the first TB must be to include the scratch buffer * and the assigned PN. - * Since PN location is 16 bytes at offset 24, it's 40 now. + * Since PN location is 8 bytes at offset 12, it's 20 now. * If we make it bigger then allocations will be bigger and copy slower, so * that's probably not useful. */ -#define IWL_FIRST_TB_SIZE 40 +#define IWL_FIRST_TB_SIZE 20 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { @@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf { * @stuck_timer: timer that fires if queue gets stuck * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index - * @active: stores if queue is active * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires + * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) * @write_ptr: 1-st empty entry (index) host_w * @read_ptr: last used entry (index) host_r * @dma_addr: physical addr for BD's @@ -277,11 +277,11 @@ struct iwl_txq { struct iwl_trans_pcie *trans_pcie; bool need_update; bool frozen; - u8 active; bool ampdu; int block; unsigned long wd_timeout; struct sk_buff_head overflow_q; + struct iwl_dma_ptr bc_tbl; int write_ptr; int read_ptr; @@ -315,11 +315,43 @@ enum iwl_shared_irq_flags { }; /** + * struct iwl_dram_data + * @physical: page phy pointer + * @block: pointer to the allocated block/page + * @size: size of the block/page + */ +struct iwl_dram_data { + dma_addr_t physical; + void *block; + int size; +}; + +/** + * struct iwl_self_init_dram - dram data used by self init process + * @fw: lmac and umac dram data + * @fw_cnt: total number of items in array + * @paging: paging dram data + * @paging_cnt: total number of items in array + */ +struct iwl_self_init_dram { + struct iwl_dram_data *fw; + int fw_cnt; + struct iwl_dram_data *paging; + int paging_cnt; +}; + +/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing + * @ctxt_info: context information for FW self init + * @ctxt_info_dma_addr: dma addr of context information + * @init_dram: DRAM data of firmware image (including paging). + * Context information addresses will be taken from here. + * This is driver's local copy for keeping track of size and + * count for allocating and freeing the memory. * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler @@ -357,6 +389,9 @@ struct iwl_trans_pcie { struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; + struct iwl_context_info *ctxt_info; + dma_addr_t ctxt_info_dma_addr; + struct iwl_self_init_dram init_dram; struct iwl_trans *trans; struct net_device napi_dev; @@ -378,9 +413,10 @@ struct iwl_trans_pcie { struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; - struct iwl_txq *txq; - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + struct iwl_txq *txq_memory; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; /* PCI bus related data */ struct pci_dev *pci_dev; @@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); @@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); * TX / HCMD ******************************************************/ int iwl_pcie_tx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); @@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); -dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq); void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, @@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } +static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, int idx) +{ + return txq->tfds + trans_pcie->tfd_size * idx; +} + static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -719,4 +762,41 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); +/* common functions that are used by gen2 transport */ +void iwl_pcie_apm_config(struct iwl_trans *trans); +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); +bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); +int iwl_queue_space(const struct iwl_txq *q); +int iwl_pcie_apm_stop_master(struct iwl_trans *trans); +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); +int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); +int iwl_pcie_txq_alloc(struct iwl_trans *trans, + struct iwl_txq *txq, int slots_num, bool cmd_queue); +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size); +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); +void iwl_pcie_apply_destination(struct iwl_trans *trans); + +/* transport gen 2 exported functions */ +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill); +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int timeout); +void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); +int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id); +int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, + bool low_power); +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); +void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); +void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); +void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index de94dfdf2ec9..1da2de205cdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) return 0; } -int iwl_pcie_rx_init(struct iwl_trans *trans) +static int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; @@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + return 0; +} + +int iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = _iwl_pcie_rx_init(trans); + + if (ret) + return ret; + if (trans->cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else - iwl_pcie_rx_hw_init(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); - iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rxq_restock(trans, trans_pcie->rxq); - spin_lock(&def_rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); - spin_unlock(&def_rxq->lock); + spin_lock(&trans_pcie->rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); + spin_unlock(&trans_pcie->rxq->lock); return 0; } +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) +{ + /* + * We don't configure the RFH. + * Restock will be done at alive, after firmware configured the RFH. + */ + return _iwl_pcie_rx_init(trans); +} + void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; @@ -1127,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); - if (reclaim) { + if (reclaim && !pkt->hdr.group_id) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { @@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) return; } - iwl_pcie_dump_csr(trans); - iwl_dump_fh(trans, NULL); - local_bh_disable(); /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ iwl_trans_fw_error(trans); local_bh_enable(); - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - del_timer(&trans_pcie->txq[i].stuck_timer); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + if (!trans_pcie->txq[i]) + continue; + del_timer(&trans_pcie->txq[i]->stuck_timer); + } clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); @@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; + if (trans->cfg->gen2) { + /* + * We can restock, since firmware configured + * the RFH + */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } } } @@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; + if (trans->cfg->gen2) { + /* We can restock, since firmware configured the RFH */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } } /* uCode wakes up after power-down sleep */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c new file mode 100644 index 000000000000..ac60a282d6de --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -0,0 +1,374 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-trans.h" +#include "iwl-context-info.h" +#include "internal.h" + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) +{ + int ret = 0; + + IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + iwl_pcie_apm_config(trans); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(trans, "Failed to init the card\n"); + return ret; + } + + set_bit(STATUS_DEVICE_ENABLED, &trans->status); + + return 0; +} + +static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) +{ + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); + + if (op_mode_leave) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_pcie_gen2_apm_init(trans); + + /* inform ME that we are leaving */ + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE | + CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + mdelay(5); + } + + clear_bit(STATUS_DEVICE_ENABLED, &trans->status); + + /* Stop device's DMA activity */ + iwl_pcie_apm_stop_master(trans); + + /* Reset the entire device */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(1000, 2000); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} + +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill, was_hw_rfkill; + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + + was_hw_rfkill = iwl_is_rfkill_set(trans); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + /* device going down, Stop using ICT table */ + iwl_pcie_disable_ict(trans); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, + "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_gen2_tx_stop(trans); + iwl_pcie_rx_stop(trans); + } + + iwl_pcie_ctxt_info_free_paging(trans); + iwl_pcie_ctxt_info_free(trans); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_pcie_gen2_apm_stop(trans, false); + + /* stop and reset the on-board processor */ + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(1000, 2000); + + /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* + * Upon stop, the APM issues an interrupt if HW RF kill is set. + * This is a bug in certain verions of the hardware. + * Certain devices also keep sending HW RF kill interrupt all + * the time, unless the interrupt is ACKed even if the interrupt + * should be masked. Re-ACK all the interrupts here. + */ + iwl_disable_interrupts(trans); + + /* clear all status bits */ + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + clear_bit(STATUS_INT_ENABLED, &trans->status); + clear_bit(STATUS_TPOWER_PMI, &trans->status); + clear_bit(STATUS_RFKILL, &trans->status); + + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); + + /* + * Check again since the RF kill state may have changed while + * all the interrupts were disabled, in this case we couldn't + * receive the RF kill interrupt and update the state in the + * op_mode. + * Don't call the op_mode if the rkfill state hasn't changed. + * This allows the op_mode to call stop_device from the rfkill + * notification without endless recursion. Under very rare + * circumstances, we might have a small recursion if the rfkill + * state changed exactly now while we were called from stop_device. + * This is very unlikely but can happen and is supported. + */ + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + if (hw_rfkill != was_hw_rfkill) + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + /* re-take ownership to prevent other users from stealing the device */ + iwl_pcie_prepare_card_hw(trans); +} + +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_gen2_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + +static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* TODO: most of the logic can be removed in A0 - but not in Z0 */ + spin_lock(&trans_pcie->irq_lock); + iwl_pcie_gen2_apm_init(trans); + spin_unlock(&trans_pcie->irq_lock); + + iwl_op_mode_nic_config(trans->op_mode); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (iwl_pcie_gen2_rx_init(trans)) + return -ENOMEM; + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_pcie_gen2_tx_init(trans)) + return -ENOMEM; + + /* enable shadow regs in HW */ + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); + + return 0; +} + +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_pcie_reset_ict(trans); + + /* make sure all queue are not stopped/used */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* now that we got alive we can free the fw image & the context info. + * paging memory cannot be freed included since FW will still use it + */ + iwl_pcie_ctxt_info_free(trans); +} + +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int ret; + + /* This may fail if AMT took ownership of the device */ + if (iwl_pcie_prepare_card_hw(trans)) { + IWL_WARN(trans, "Exit HW not ready\n"); + ret = -EIO; + goto out; + } + + iwl_enable_rfkill_int(trans); + + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + /* + * We enabled the RF-Kill interrupt and the handler may very + * well be running. Disable the interrupts to make sure no other + * interrupt can be fired. + */ + iwl_disable_interrupts(trans); + + /* Make sure it finished running */ + iwl_pcie_synchronize_irqs(trans); + + mutex_lock(&trans_pcie->mutex); + + /* If platform's RF_KILL switch is NOT set to KILL */ + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = -EIO; + goto out; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + ret = iwl_pcie_gen2_nic_init(trans); + if (ret) { + IWL_ERR(trans, "Unable to init nic\n"); + goto out; + } + + ret = iwl_pcie_ctxt_info_init(trans, fw); + if (ret) + goto out; + + /* re-check RF-Kill state since we may have missed the interrupt */ + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) + ret = -ERFKILL; + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7f05fc56587a..70acf850a9f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -7,7 +7,7 @@ * * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 -static void iwl_pcie_apm_config(struct iwl_trans *trans) +void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; @@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); } -static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) +int iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret = 0; @@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) } /* Note: returns standard 0/-ERROR code */ -static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; @@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); } -static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans, - u32 dst_addr, dma_addr_t phy_addr, - u32 byte_cnt) -{ - /* Stop DMA channel */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0); - - /* Configure SRAM address */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR, - dst_addr); - - /* Configure DRAM address - 64 bit */ - iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr); - - /* Configure byte count to transfer */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt); - - /* Enable the DRAM2SRAM to start */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP | - TFH_SRV_DMA_TO_DRIVER | - TFH_SRV_DMA_START); -} - static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) @@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, if (!iwl_trans_grab_nic_access(trans, &flags)) return -EIO; - if (trans->cfg->use_tfh) - iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr, - byte_cnt); - else - iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, - byte_cnt); + iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, + byte_cnt); iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, @@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, return ret; } -/* - * Driver Takes the ownership on secure machine before FW load - * and prevent race with the BT load. - * W/A for ROM bug. (should be remove in the next Si step) - */ -static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) -{ - u32 val, loop = 1000; - - /* - * Check the RSA semaphore is accessible. - * If the HW isn't locked and the rsa semaphore isn't accessible, - * we are in trouble. - */ - val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); - if (val & (BIT(1) | BIT(17))) { - IWL_DEBUG_INFO(trans, - "can't access the RSA semaphore it is write protected\n"); - return 0; - } - - /* take ownership on the AUX IF */ - iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); - iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); - - do { - iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); - val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); - if (val == 0x1) { - iwl_write_prph(trans, RSA_ENABLE, 0); - return 0; - } - - udelay(10); - loop--; - } while (loop > 0); - - IWL_ERR(trans, "Failed to take ownership on secure machine\n"); - return -EIO; -} - static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, const struct fw_img *image, int cpu, @@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, return ret; /* Notify ucode of loaded section number and status */ - if (trans->cfg->use_tfh) { - val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val); - } else { - val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); - } + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + sec_num = (sec_num << 1) | 0x1; } @@ -904,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, return 0; } -static void iwl_pcie_apply_destination(struct iwl_trans *trans) +void iwl_pcie_apply_destination(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; @@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, if (trans->dbg_dest_tlv) iwl_pcie_apply_destination(trans); - /* TODO: remove in the next Si step */ - ret = iwl_pcie_rsa_race_bug_wa(trans); - if (ret) - return ret; + IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", + iwl_read_prph(trans, WFPM_GP2)); + + /* + * Set default value. On resume reading the values that were + * zeored can provide debug data on the resume flow. + * This is for debugging only and has no functional impact. + */ + iwl_write_prph(trans, WFPM_GP2, 0x01010101); /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ @@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } -static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) { bool hw_rfkill = iwl_is_rfkill_set(trans); @@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } -static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; @@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } -static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) lockdep_assert_held(&trans_pcie->mutex); - if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - _iwl_trans_pcie_stop_device(trans, true); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { + if (trans->cfg->gen2) + _iwl_trans_pcie_gen2_stop_device(trans, true); + else + _iwl_trans_pcie_stop_device(trans, true); + } } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, @@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, } } + IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", + iwl_read_prph(trans, WFPM_GP2)); + val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) *status = IWL_D3_STATUS_RESET; @@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); - iwl_pcie_tx_free(trans); + if (trans->cfg->gen2) + iwl_pcie_gen2_tx_free(trans); + else + iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); if (trans_pcie->msix_enabled) { @@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = &trans_pcie->txq[queue]; + struct iwl_txq *txq = trans_pcie->txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); @@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = &trans_pcie->txq[i]; + struct iwl_txq *txq = trans_pcie->txq[i]; if (i == trans_pcie->cmd_queue) continue; @@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 scd_sram_addr; - u8 buf[16]; - int cnt; + u32 txq_id = txq->id; + u32 status; + bool active; + u8 fifo; - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->read_ptr, txq->write_ptr); - - if (trans->cfg->use_tfh) + if (trans->cfg->use_tfh) { + IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, + txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ return; + } - scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->id); - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, - iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); - - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); - - if (cnt & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; + status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); + fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - cnt, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); - } + IWL_ERR(trans, + "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", + txq_id, active ? "" : "in", fifo, + jiffies_to_msecs(txq->wd_timeout), + txq->read_ptr, txq->write_ptr, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) @@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) continue; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); - txq = &trans_pcie->txq[cnt]; + txq = trans_pcie->txq[cnt]; wr_ptr = ACCESS_ONCE(txq->write_ptr); while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && @@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; - if (!trans_pcie->txq) + if (!trans_pcie->txq_memory) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); @@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, return -ENOMEM; for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - txq = &trans_pcie->txq[cnt]; + txq = trans_pcie->txq[cnt]; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", cnt, txq->read_ptr, txq->write_ptr, @@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs; @@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) } #endif /* CONFIG_PM_SLEEP */ +#define IWL_TRANS_COMMON_OPS \ + .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ + .write8 = iwl_trans_pcie_write8, \ + .write32 = iwl_trans_pcie_write32, \ + .read32 = iwl_trans_pcie_read32, \ + .read_prph = iwl_trans_pcie_read_prph, \ + .write_prph = iwl_trans_pcie_write_prph, \ + .read_mem = iwl_trans_pcie_read_mem, \ + .write_mem = iwl_trans_pcie_write_mem, \ + .configure = iwl_trans_pcie_configure, \ + .set_pmi = iwl_trans_pcie_set_pmi, \ + .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ + .release_nic_access = iwl_trans_pcie_release_nic_access, \ + .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ + .ref = iwl_trans_pcie_ref, \ + .unref = iwl_trans_pcie_unref, \ + .dump_data = iwl_trans_pcie_dump_data, \ + .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \ + .d3_suspend = iwl_trans_pcie_d3_suspend, \ + .d3_resume = iwl_trans_pcie_d3_resume + +#ifdef CONFIG_PM_SLEEP +#define IWL_TRANS_PM_OPS \ + .suspend = iwl_trans_pcie_suspend, \ + .resume = iwl_trans_pcie_resume, +#else +#define IWL_TRANS_PM_OPS +#endif /* CONFIG_PM_SLEEP */ + static const struct iwl_trans_ops trans_ops_pcie = { + IWL_TRANS_COMMON_OPS, + IWL_TRANS_PM_OPS .start_hw = iwl_trans_pcie_start_hw, - .op_mode_leave = iwl_trans_pcie_op_mode_leave, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, - .d3_suspend = iwl_trans_pcie_d3_suspend, - .d3_resume = iwl_trans_pcie_d3_resume, - -#ifdef CONFIG_PM_SLEEP - .suspend = iwl_trans_pcie_suspend, - .resume = iwl_trans_pcie_resume, -#endif /* CONFIG_PM_SLEEP */ - .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, @@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = { .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, - .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table, - .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, - .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, +}; + +static const struct iwl_trans_ops trans_ops_pcie_gen2 = { + IWL_TRANS_COMMON_OPS, + IWL_TRANS_PM_OPS + .start_hw = iwl_trans_pcie_start_hw, + .fw_alive = iwl_trans_pcie_gen2_fw_alive, + .start_fw = iwl_trans_pcie_gen2_start_fw, + .stop_device = iwl_trans_pcie_gen2_stop_device, - .write8 = iwl_trans_pcie_write8, - .write32 = iwl_trans_pcie_write32, - .read32 = iwl_trans_pcie_read32, - .read_prph = iwl_trans_pcie_read_prph, - .write_prph = iwl_trans_pcie_write_prph, - .read_mem = iwl_trans_pcie_read_mem, - .write_mem = iwl_trans_pcie_write_mem, - .configure = iwl_trans_pcie_configure, - .set_pmi = iwl_trans_pcie_set_pmi, - .grab_nic_access = iwl_trans_pcie_grab_nic_access, - .release_nic_access = iwl_trans_pcie_release_nic_access, - .set_bits_mask = iwl_trans_pcie_set_bits_mask, - - .ref = iwl_trans_pcie_ref, - .unref = iwl_trans_pcie_unref, - - .dump_data = iwl_trans_pcie_dump_data, + .send_cmd = iwl_trans_pcie_gen2_send_hcmd, + + .tx = iwl_trans_pcie_gen2_tx, + .reclaim = iwl_trans_pcie_reclaim, + + .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, + .txq_free = iwl_trans_pcie_dyn_txq_free, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (ret) return ERR_PTR(ret); - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, cfg, &trans_ops_pcie, 0); + if (cfg->gen2) + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie_gen2); + else + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie); if (!trans) return ERR_PTR(-ENOMEM); @@ -3028,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - trans->dev = &pdev->dev; trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c new file mode 100644 index 000000000000..9fb46a6f47cf --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -0,0 +1,1018 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/pm_runtime.h> + +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-io.h" +#include "internal.h" +#include "mvm/fw-api.h" + + /* + * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels + */ +void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { + if (!trans_pcie->txq[txq_id]) + continue; + iwl_pcie_gen2_txq_unmap(trans, txq_id); + } +} + +/* + * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + int write_ptr = txq->write_ptr; + u8 filled_tfd_size, num_fetch_chunks; + u16 len = byte_cnt; + __le16 bc_ent; + + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + scd_bc_tbl->tfd_offset[write_ptr] = bc_ent; +} + +/* + * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); +} + +static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + return le16_to_cpu(tfd->num_tbs) & 0x1f; +} + +static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); + + if (num_tbs >= trans_pcie->max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + return; + } + + /* first TB is never freed - it's the bidirectional DMA data */ + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + } + + tfd->num_tbs = 0; +} + +static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int rd_ptr = txq->read_ptr; + int idx = get_cmd_index(txq, rd_ptr); + + lockdep_assert_held(&txq->lock); + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); + + /* free SKB */ + if (txq->entries) { + struct sk_buff *skb; + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } + } +} + +static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd, dma_addr_t addr, + u16 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans_pcie->max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +static +struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfh_tfd *tfd = + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + dma_addr_t tb_phys; + int i, len, tb1_len, tb2_len, hdr_len; + void *tb1_addr; + + memset(tfd, 0, sizeof(*tfd)); + + tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + IWL_FIRST_TB_SIZE); + + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + + ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; + + tb1_len = ALIGN(len, 4); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + + /* set up TFD's third entry to point to remainder of skb's head */ + hdr_len = ieee80211_hdrlen(hdr->frame_control); + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, + skb_frag_size(frag)); + + out_meta->tbs |= BIT(tb_idx); + } + + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); + trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len, + skb->len - hdr_len); + + return tfd; + +out_err: + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + void *tfd; + + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && + __skb_linearize(skb)) + return -ENOMEM; + + spin_lock(&txq->lock); + + /* Set up driver data for this TFD */ + txq->entries[txq->write_ptr].skb = skb; + txq->entries[txq->write_ptr].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(txq->write_ptr))); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[txq->write_ptr].meta; + out_meta->flags = 0; + + tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); + if (!tfd) { + spin_unlock(&txq->lock); + return -1; + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_gen2_get_num_tbs(trans, tfd)); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr) { + if (txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); + iwl_trans_ref(trans); + } + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); + if (iwl_queue_space(txq) < txq->high_mark) + iwl_stop_queue(trans, txq); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a pointer to the ucode command structure + * + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the + * command queue. + */ +static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + unsigned long flags; + void *dup_buf = NULL; + dma_addr_t phys_addr; + int idx, i, cmd_pos; + u16 copy_size, cmd_size, tb0_size; + bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + struct iwl_tfh_tfd *tfd = + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + + memset(tfd, 0, sizeof(*tfd)); + + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + cmddata[i] = cmd->data[i]; + cmdlen[i] = cmd->len[i]; + + if (!cmd->len[i]) + continue; + + /* need at least IWL_FIRST_TB_SIZE copied */ + if (copy_size < IWL_FIRST_TB_SIZE) { + int copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmdlen[i]) + copy = cmdlen[i]; + cmdlen[i] -= copy; + cmddata[i] += copy; + copy_size += copy; + } + + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { + idx = -EINVAL; + goto free_dup_buf; + } + } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { + /* + * This is also a chunk that isn't copied + * to the static buffer so set had_nocopy. + */ + had_nocopy = true; + + /* only allowed once */ + if (WARN_ON(dup_buf)) { + idx = -EINVAL; + goto free_dup_buf; + } + + dup_buf = kmemdup(cmddata[i], cmdlen[i], + GFP_ATOMIC); + if (!dup_buf) + return -ENOMEM; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) { + idx = -EINVAL; + goto free_dup_buf; + } + copy_size += cmdlen[i]; + } + cmd_size += cmd->len[i]; + } + + /* + * If any of the command structures end up being larger than the + * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into + * separate TFDs, then we will need to increase the size of the buffers + */ + if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, + "Command %s (%#x) is too large (%d bytes)\n", + iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { + idx = -EINVAL; + goto free_dup_buf; + } + + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_bh(&txq->lock); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); + idx = -ENOSPC; + goto free_dup_buf; + } + + idx = get_cmd_index(txq, txq->write_ptr); + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; + + /* re-initialize to NULL */ + memset(out_meta, 0, sizeof(*out_meta)); + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + + /* set up the header */ + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(txq->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + + /* and copy the data that needs to be copied */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + int copy; + + if (!cmd->len[i]) + continue; + + /* copy everything if not nocopy/dup */ + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) { + copy = cmd->len[i]; + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_FIRST_TB_SIZE copied + * in total (for bi-directional DMA), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_FIRST_TB_SIZE) { + copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; + } + } + + IWL_DEBUG_HC(trans, + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + iwl_get_cmd_string(trans, cmd->id), group_id, + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); + + /* start the TFD with the minimum copy bytes */ + tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); + iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), + tb0_size); + + /* map first command fragment, if any remains */ + if (copy_size > tb0_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + tb0_size, + copy_size - tb0_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, + copy_size - tb0_size); + } + + /* map the remaining (adjusted) nocopy/dup fragments */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + const void *data = cmddata[i]; + + if (!cmdlen[i]) + continue; + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) + data = dup_buf; + phys_addr = dma_map_single(trans->dev, (void *)data, + cmdlen[i], DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); + } + + BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); + out_meta->flags = cmd->flags; + if (WARN_ON_ONCE(txq->entries[idx].free_buf)) + kzfree(txq->entries[idx].free_buf); + txq->entries[idx].free_buf = dup_buf; + + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (!(cmd->flags & CMD_SEND_IN_IDLE) && + !trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = true; + IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); + iwl_trans_ref(trans); + } + /* Increment and update queue's write index */ + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + +out: + spin_unlock_bh(&txq->lock); +free_dup_buf: + if (idx < 0) + kfree(dup_buf); + return idx; +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + int cmd_idx; + int ret; + + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); + + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", cmd_str)) + return -EIO; + + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); + + if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + pm_runtime_active(&trans_pcie->pci_dev->dev), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); + return -ETIMEDOUT; + } + } + + cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + cmd_str, ret); + return ret; + } + + ret = wait_event_timeout(trans_pcie->wait_command_queue, + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + txq->read_ptr, txq->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + cmd_str); + ret = -ETIMEDOUT; + + iwl_force_nmi(trans); + iwl_trans_fw_error(trans); + + goto cancel; + } + + if (test_bit(STATUS_FW_ERROR, &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + ret = -EIO; + goto cancel; + } + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); + ret = -ERFKILL; + goto cancel; + } + + if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { + IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + } + + if (cmd->resp_pkt) { + iwl_free_resp(cmd); + cmd->resp_pkt = NULL; + } + + return ret; +} + +int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", + cmd->id); + return -ERFKILL; + } + + if (cmd->flags & CMD_ASYNC) { + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + if (ret < 0) { + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_get_cmd_string(trans, cmd->id), ret); + return ret; + } + return 0; + } + + return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); +} + +/* + * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + iwl_pcie_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + + if (txq->read_ptr == txq->write_ptr) { + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (txq_id != trans_pcie->cmd_queue) { + IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", + txq->id); + iwl_trans_unref(trans); + } else if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, + "clear ref_cmd_in_flight\n"); + iwl_trans_unref(trans); + } + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } + } + spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); +} + +static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->dma_addr); + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); + kfree(txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + int i; + + if (WARN_ON(!txq)) + return; + + iwl_pcie_gen2_txq_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans_pcie->cmd_queue) + for (i = 0; i < txq->n_window; i++) { + kzfree(txq->entries[i].cmd); + kzfree(txq->entries[i].free_buf); + } + del_timer_sync(&txq->stuck_timer); + + iwl_pcie_gen2_txq_free_memory(trans, txq); + + trans_pcie->txq[txq_id] = NULL; + + clear_bit(txq_id, trans_pcie->queue_used); +} + +int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue_cfg_rsp *rsp; + struct iwl_txq *txq; + struct iwl_host_cmd hcmd = { + .id = cmd_id, + .len = { sizeof(*cmd) }, + .data = { cmd, }, + .flags = CMD_WANT_SKB, + }; + int ret, qid; + + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return -ENOMEM; + ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, + sizeof(struct iwlagn_scd_bc_tbl)); + if (ret) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + kfree(txq); + return -ENOMEM; + } + + ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false); + if (ret) { + IWL_ERR(trans, "Tx queue alloc failed\n"); + goto error; + } + ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false); + if (ret) { + IWL_ERR(trans, "Tx queue init failed\n"); + goto error; + } + + txq->wd_timeout = msecs_to_jiffies(timeout); + + cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { + ret = -EINVAL; + goto error; + } + + rsp = (void *)hcmd.resp_pkt->data; + qid = le16_to_cpu(rsp->queue_number); + + if (qid > ARRAY_SIZE(trans_pcie->txq)) { + WARN_ONCE(1, "queue index %d unsupported", qid); + ret = -EIO; + goto error; + } + + if (test_and_set_bit(qid, trans_pcie->queue_used)) { + WARN_ONCE(1, "queue %d already used", qid); + ret = -EIO; + goto error; + } + + txq->id = qid; + trans_pcie->txq[qid] = txq; + + /* Place first TFD at index corresponding to start sequence number */ + txq->read_ptr = le16_to_cpu(rsp->write_pointer); + txq->write_ptr = le16_to_cpu(rsp->write_pointer); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (txq->write_ptr) | (qid << 16)); + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); + + return qid; + +error: + iwl_pcie_gen2_txq_free_memory(trans, txq); + return ret; +} + +void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", queue); + return; + } + + iwl_pcie_gen2_txq_unmap(trans, queue); + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); +} + +void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* Free all TX queues */ + for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { + if (!trans_pcie->txq[i]) + continue; + + iwl_pcie_gen2_txq_free(trans, i); + } +} + +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *cmd_queue; + int txq_id = trans_pcie->cmd_queue, ret; + + /* alloc and init the command queue */ + if (!trans_pcie->txq[txq_id]) { + cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); + if (!cmd_queue) { + IWL_ERR(trans, "Not enough memory for command queue\n"); + return -ENOMEM; + } + trans_pcie->txq[txq_id] = cmd_queue; + ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } else { + cmd_queue = trans_pcie->txq[txq_id]; + } + + ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans_pcie->txq[txq_id]->id = txq_id; + set_bit(txq_id, trans_pcie->queue_used); + + return 0; + +error: + iwl_pcie_gen2_tx_free(trans); + return ret; +} + diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 911cf9868107..386950a2d616 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -71,7 +71,7 @@ * ***************************************************/ -static int iwl_queue_space(const struct iwl_txq *q) +int iwl_queue_space(const struct iwl_txq *q) { unsigned int max; unsigned int used; @@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q) /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) +static int iwl_queue_init(struct iwl_txq *q, int slots_num) { q->n_window = slots_num; - q->id = id; /* slots_num must be power-of-two size, otherwise * get_cmd_index is broken. */ @@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) return 0; } -static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr, size_t size) +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size) { if (WARN_ON(ptr->addr)) return -EINVAL; @@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, return 0; } -static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr) +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; @@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) } spin_unlock(&txq->lock); - IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, - jiffies_to_msecs(txq->wd_timeout)); - iwl_trans_pcie_log_scd_error(trans, txq); iwl_force_nmi(trans); @@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = (void *)txq->entries[txq->write_ptr].cmd->payload; + u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; - if (trans->cfg->use_tfh) { - u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + - num_tbs * sizeof(struct iwl_tfh_tb); - /* - * filled_tfd_size contains the number of filled bytes in the - * TFD. - * Dividing it by 64 will give the number of chunks to fetch - * to SRAM- 0 for one chunk, 1 for 2 and so on. - * If, for example, TFD contains only 3 TBs then 32 bytes - * of the TFD are used, and only one chunk of 64 bytes should - * be fetched - */ - u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - } else { - u8 sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - } + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = &trans_pcie->txq[i]; + struct iwl_txq *txq = trans_pcie->txq[i]; spin_lock_bh(&txq->lock); - if (trans_pcie->txq[i].need_update) { + if (txq->need_update) { iwl_pcie_txq_inc_wr_ptr(trans, txq); - trans_pcie->txq[i].need_update = false; + txq->need_update = false; } spin_unlock_bh(&txq->lock); } } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, - struct iwl_txq *txq, int idx) -{ - return txq->tfds + trans_pcie->tfd_size * idx; -} - static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { @@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, u8 idx, dma_addr_t addr, u16 len) { - if (trans->cfg->use_tfh) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; + struct iwl_tfd *tfd_fh = (void *)tfd; + struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); + u16 hi_n_len = len << 4; - tfd_fh->num_tbs = cpu_to_le16(idx + 1); - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); - tb->hi_n_len = cpu_to_le16(hi_n_len); + tb->hi_n_len = cpu_to_le16(hi_n_len); - tfd_fh->num_tbs = idx + 1; - } + tfd_fh->num_tbs = idx + 1; } static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) @@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window @@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, return num_tbs; } -static int iwl_pcie_txq_alloc(struct iwl_trans *trans, - struct iwl_txq *txq, int slots_num, - u32 txq_id) +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; @@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->entries) goto error; - if (txq_id == trans_pcie->cmd_queue) + if (cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), @@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->first_tb_bufs) goto err_free_tfds; - txq->id = txq_id; - return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); error: - if (txq->entries && txq_id == trans_pcie->cmd_queue) + if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); @@ -589,10 +546,9 @@ error: } -static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, u32 txq_id) +int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; txq->need_update = false; @@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num, txq_id); + ret = iwl_queue_init(txq, slots_num); if (ret) return ret; spin_lock_init(&txq->lock); - if (txq_id == trans_pcie->cmd_queue) { + if (cmd_queue) { static struct lock_class_key iwl_pcie_cmd_queue_lock_class; lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); @@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, __skb_queue_head_init(&txq->overflow_q); - /* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * Circular buffer (TFD queue in DRAM) physical base address */ - if (trans->cfg->use_tfh) - iwl_write_direct64(trans, - FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->dma_addr); - else - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->dma_addr >> 8); - return 0; } @@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { @@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } - txq->active = false; while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); @@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct device *dev = trans->dev; int i; @@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - if (trans->cfg->use_tfh) - return; - trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; + /* + * we should never get here in gen2 trans mode return early to avoid + * having invalid accesses + */ + if (WARN_ON_ONCE(trans->cfg->gen2)) + return; + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; if (trans->cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), @@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* This can happen: start_hw, stop_device */ - if (!trans_pcie->txq) + if (!trans_pcie->txq_memory) return 0; /* Unmap DMA from host system and free skb's */ @@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + /* Tx queues */ - if (trans_pcie->txq) { + if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; txq_id++) + txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { iwl_pcie_txq_free(trans, txq_id); + trans_pcie->txq[txq_id] = NULL; + } } - kfree(trans_pcie->txq); - trans_pcie->txq = NULL; + kfree(trans_pcie->txq_memory); + trans_pcie->txq_memory = NULL; iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); @@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ - if (WARN_ON(trans_pcie->txq)) { + if (WARN_ON(trans_pcie->txq_memory)) { ret = -EINVAL; goto error; } @@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); - if (!trans_pcie->txq) { + trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); + if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; goto error; @@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); + bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } + trans_pcie->txq[txq_id]->id = txq_id; } return 0; @@ -1012,6 +967,7 @@ error: return ret; } + int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) int txq_id, slots_num; bool alloc = false; - if (!trans_pcie->txq) { + if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) goto error; @@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); + bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } - } - if (trans->cfg->use_tfh) { - iwl_write_direct32(trans, TFH_TRANSFER_MODE, - TFH_TRANSFER_MAX_PENDING_REQ | - TFH_CHUNK_SIZE_128 | - TFH_CHUNK_SPLIT_MODE); - return 0; + /* + * Tell nic where to find circular buffer of TFDs for a + * given Tx queue, and enable the DMA channel used for that + * queue. + * Circular buffer (TFD queue in DRAM) physical base address + */ + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), + trans_pcie->txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); @@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); int last_to_free; @@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, spin_lock_bh(&txq->lock); - if (!txq->active) { + if (!test_bit(txq_id, trans_pcie->queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; @@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; unsigned long flags; int nfreed = 0; @@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; int fifo = -1; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - if (cfg && trans->cfg->use_tfh) - WARN_ONCE(1, "Expected no calls to SCD configuration"); - txq->wd_timeout = msecs_to_jiffies(wdg_timeout); if (cfg) { @@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, "Activate queue %d WrPtr: %d\n", txq_id, ssn & 0xff); } - - txq->active = true; } void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; txq->ampdu = !shared_mode; } -dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - return trans_pcie->scd_bc_tbls.dma + - txq * sizeof(struct iwlagn_scd_bc_tbl); -} - void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { @@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; - trans_pcie->txq[txq_id].frozen = false; + trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; + trans_pcie->txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, return; } - if (configure_scd && trans->cfg->use_tfh) - WARN_ONCE(1, "Expected no calls to SCD configuration"); - if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); @@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, } iwl_pcie_txq_unmap(trans, txq_id); - trans_pcie->txq[txq_id].ampdu = false; + trans_pcie->txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; @@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->cmd_queue, sequence, - trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, - trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { + txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, + txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } @@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; @@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - IWL_ERR(trans, "Error sending %s: time out after %dms.\n", iwl_get_cmd_string(trans, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); @@ -1959,8 +1899,7 @@ cancel: * in later, it will possibly set an invalid * address (cmd->meta.source). */ - trans_pcie->txq[trans_pcie->cmd_queue]. - entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { @@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u16 wifi_seq; bool amsdu; - txq = &trans_pcie->txq[txq_id]; + txq = trans_pcie->txq[txq_id]; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), "TX on unused queue %d\n", txq_id)) |