summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c309
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c79
4 files changed, 310 insertions, 100 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d91d2e8c62f5..20735a008cab 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -222,8 +222,6 @@ struct iwl_txq {
* @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
- * @irq - the irq number for the device
- * @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
@@ -234,6 +232,7 @@ struct iwl_txq {
* @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
*/
@@ -249,11 +248,9 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
- bool irq_requested;
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
- unsigned int irq;
spinlock_t irq_lock;
u32 inta_mask;
u32 scd_base_addr;
@@ -279,6 +276,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
bool rx_buf_size_8k;
+ bool bc_table_dword;
u32 rx_page_order;
const char **command_names;
@@ -359,6 +357,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+
/*****************************************************
* Error handling
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 8389cd38338b..4e6591d24e61 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -436,7 +436,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
err_rb_stts:
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
err_bd:
return -ENOMEM;
@@ -455,6 +455,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ /* reset and flush pointers */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
@@ -491,7 +495,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
-
int i, err;
unsigned long flags;
@@ -518,6 +521,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags);
iwl_pcie_rx_replenish(trans);
@@ -545,13 +549,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
spin_lock_irqsave(&rxq->lock, flags);
iwl_pcie_rxq_free_rbs(trans);
spin_unlock_irqrestore(&rxq->lock, flags);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
if (rxq->rb_stts)
@@ -560,7 +566,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts, rxq->rb_stts_dma);
else
IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 35708b959ad6..c57641eb83d5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,21 +75,16 @@
#include "iwl-agn-hw.h"
#include "internal.h"
-static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
+ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ else
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
}
/* PCI registers */
@@ -259,7 +254,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_pcie_set_pwr_vmain(trans);
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,7 +430,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -454,7 +449,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (hw_rfkill)
+ if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
@@ -534,12 +529,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans);
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(trans_pcie->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
-
- cancel_work_sync(&trans_pcie->rx_replenish);
-
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
@@ -551,46 +540,87 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans_pcie->status);
}
-static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
{
/* let the ucode operate on its own */
iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
iwl_disable_interrupts(trans);
+ iwl_pcie_disable_ict(trans);
+
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+
+ iwl_pcie_set_pwr(trans, true);
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int err;
- bool hw_rfkill;
+ u32 val;
+ int ret;
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ iwl_pcie_set_pwr(trans, false);
- if (!trans_pcie->irq_requested) {
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_pcie_tasklet, (unsigned long)trans);
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ *status = IWL_D3_STATUS_RESET;
+ return 0;
+ }
- iwl_pcie_alloc_ict(trans);
+ /*
+ * Also enables interrupts - none will happen as the device doesn't
+ * know we're waking it up, only when the opmode actually tells it
+ * after this call.
+ */
+ iwl_pcie_reset_ict(trans);
- err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
- IRQF_SHARED, DRV_NAME, trans);
- if (err) {
- IWL_ERR(trans, "Error allocating IRQ %d\n",
- trans_pcie->irq);
- goto error;
- }
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- trans_pcie->irq_requested = true;
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+ return ret;
}
+ iwl_trans_pcie_tx_reset(trans);
+
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
+ return ret;
+ }
+
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ bool hw_rfkill;
+ int err;
+
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
- goto err_free_irq;
+ return err;
}
iwl_pcie_apm_init(trans);
@@ -601,15 +631,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- return err;
-
-err_free_irq:
- trans_pcie->irq_requested = false;
- free_irq(trans_pcie->irq, trans);
-error:
- iwl_pcie_free_ict(trans);
- tasklet_kill(&trans_pcie->irq_tasklet);
- return err;
+ return 0;
}
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
@@ -703,19 +725,21 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
trans_pcie->command_names = trans_cfg->command_names;
+ trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ synchronize_irq(trans_pcie->pci_dev->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
- if (trans_pcie->irq_requested == true) {
- free_irq(trans_pcie->irq, trans);
- iwl_pcie_free_ict(trans);
- }
+ free_irq(trans_pcie->pci_dev->irq, trans);
+ iwl_pcie_free_ict(trans);
pci_disable_msi(trans_pcie->pci_dev);
iounmap(trans_pcie->hw_base);
@@ -751,13 +775,112 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (!hw_rfkill)
- iwl_enable_interrupts(trans);
-
return 0;
}
#endif /* CONFIG_PM_SLEEP */
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+{
+ int ret;
+
+ lockdep_assert_held(&trans->reg_lock);
+
+ /* this bit wakes up the NIC */
+ __iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ * 5000 series and later (including 1000 series) have non-volatile SRAM,
+ * and do not save/restore SRAM when power cycling.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (unlikely(ret < 0)) {
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ if (!silent) {
+ u32 val = iwl_read32(trans, CSR_GP_CNTRL);
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+{
+ lockdep_assert_held(&trans->reg_lock);
+ __iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
+}
+
+static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_trans_release_nic_access(trans);
+ } else {
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ return ret;
+}
+
+static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans);
+ } else {
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ return ret;
+}
+
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
@@ -767,6 +890,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
+ u32 scd_sram_addr;
+ u8 buf[16];
int ret = 0;
/* waiting for all the tx frames complete might take a while */
@@ -780,11 +905,50 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
msleep(1);
if (q->read_ptr != q->write_ptr) {
- IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
break;
}
}
+
+ if (!ret)
+ return 0;
+
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+
+ if (cnt & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ cnt, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
+ }
+
return ret;
}
@@ -1212,7 +1376,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
+ .d3_suspend = iwl_trans_pcie_d3_suspend,
+ .d3_resume = iwl_trans_pcie_d3_resume,
.send_cmd = iwl_trans_pcie_send_hcmd,
@@ -1235,8 +1400,12 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.read32 = iwl_trans_pcie_read32,
.read_prph = iwl_trans_pcie_read_prph,
.write_prph = iwl_trans_pcie_write_prph,
+ .read_mem = iwl_trans_pcie_read_mem,
+ .write_mem = iwl_trans_pcie_write_mem,
.configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi,
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access,
+ .release_nic_access = iwl_trans_pcie_release_nic_access
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1318,7 +1487,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans->dev = &pdev->dev;
- trans_pcie->irq = pdev->irq;
trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
@@ -1344,8 +1512,27 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (!trans->dev_cmd_pool)
goto out_pci_disable_msi;
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_pcie_tasklet, (unsigned long)trans);
+
+ if (iwl_pcie_alloc_ict(trans))
+ goto out_free_cmd_pool;
+
+ err = request_irq(pdev->irq, iwl_pcie_isr_ict,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+
return trans;
+out_free_ict:
+ iwl_pcie_free_ict(trans);
+out_free_cmd_pool:
+ kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6c5b867c353a..a93f06762b96 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
+ iwl_trans_read_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
break;
}
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -306,6 +309,9 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
return;
}
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
+ txq->q.write_ptr);
+
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
@@ -612,7 +618,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->q.n_bd) {
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ txq->q.dma_addr = 0;
}
kfree(txq->entries);
@@ -638,9 +644,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
+ int nq = trans->cfg->base_params->num_of_queues;
int chan;
u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
@@ -652,20 +660,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
WARN_ON(scd_base_addr != 0 &&
scd_base_addr != trans_pcie->scd_base_addr);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
+ /* reset context data, TX status and translation data */
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
@@ -697,6 +695,29 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->q.read_ptr = 0;
+ txq->q.write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+}
+
/*
* iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
@@ -1002,14 +1023,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
return 0;
}
@@ -1068,9 +1089,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
@@ -1101,8 +1122,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_set_inactive(trans, txq_id);
- _iwl_write_targ_mem_dwords(trans, stts_addr,
- zero_val, ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id);
@@ -1642,10 +1663,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));