diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | 144 |
1 files changed, 72 insertions, 72 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 80f531844f3e..a1ee2e4b0ced 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -80,7 +80,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; - struct device *dev = bus(trans)->dev; + struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); @@ -124,7 +124,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { - dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma, + dma_unmap_page(trans->dev, rxq->pool[i].page_dma, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); __free_pages(rxq->pool[i].page, @@ -148,17 +148,17 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans, rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; /* Stop Rx DMA */ - iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* Reset driver's Rx queue write index */ - iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ - iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG, + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ - iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG, + iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); /* Enable Rx DMA @@ -169,7 +169,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans, * RB timeout 0x10 * 256 RBDs */ - iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | @@ -179,7 +179,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans, (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); /* Set interrupt coalescing timer to default (2048 usecs) */ - iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } static int iwl_rx_init(struct iwl_trans *trans) @@ -244,13 +244,13 @@ static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) iwl_trans_rxq_free_rx_bufs(trans); spin_unlock_irqrestore(&rxq->lock, flags); - dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE, + dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); rxq->bd = NULL; if (rxq->rb_stts) - dma_free_coherent(bus(trans)->dev, + dma_free_coherent(trans->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); else @@ -263,8 +263,8 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans) { /* stop Rx DMA */ - iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG, + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); } @@ -274,7 +274,7 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, if (WARN_ON(ptr->addr)) return -EINVAL; - ptr->addr = dma_alloc_coherent(bus(trans)->dev, size, + ptr->addr = dma_alloc_coherent(trans->dev, size, &ptr->dma, GFP_KERNEL); if (!ptr->addr) return -ENOMEM; @@ -288,7 +288,7 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, if (unlikely(!ptr->addr)) return; - dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma); + dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); memset(ptr, 0, sizeof(*ptr)); } @@ -335,7 +335,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ - txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz, + txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); if (!txq->tfds) { IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); @@ -391,7 +391,7 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, * Tell nic where to find circular buffer of Tx Frame Descriptors for * given Tx queue, and enable the DMA channel used for that queue. * Circular buffer (TFD queue in DRAM) physical base address */ - iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id), + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); return 0; @@ -445,7 +445,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; - struct device *dev = bus(trans)->dev; + struct device *dev = trans->dev; int i; if (WARN_ON(!txq)) return; @@ -586,10 +586,10 @@ static int iwl_tx_init(struct iwl_trans *trans) spin_lock_irqsave(&trans->shrd->lock, flags); /* Turn off all Tx DMA fifos */ - iwl_write_prph(bus(trans), SCD_TXFACT, 0); + iwl_write_prph(trans, SCD_TXFACT, 0); /* Tell NIC where to find the "keep warm" buffer */ - iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG, + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); spin_unlock_irqrestore(&trans->shrd->lock, flags); @@ -621,12 +621,12 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans) * to set power to V_AUX, do: if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) - iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG, + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); */ - iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG, + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } @@ -640,7 +640,7 @@ static int iwl_nic_init(struct iwl_trans *trans) iwl_apm_init(priv(trans)); /* Set interrupt coalescing calibration timer to default (512 usecs) */ - iwl_write8(bus(trans), CSR_INT_COALESCING, + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); spin_unlock_irqrestore(&trans->shrd->lock, flags); @@ -660,7 +660,7 @@ static int iwl_nic_init(struct iwl_trans *trans) if (hw_params(trans).shadow_reg_enable) { /* enable shadow regs in HW */ - iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL, + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); } @@ -676,11 +676,11 @@ static int iwl_set_hw_ready(struct iwl_trans *trans) { int ret; - iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG, + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ - ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG, + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); @@ -701,10 +701,10 @@ static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans) return 0; /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG, + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); - ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG, + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); @@ -794,7 +794,7 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans) } /* If platform's RF_KILL switch is NOT set to KILL */ - if (iwl_read32(bus(trans), CSR_GP_CNTRL) & + if (iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); else @@ -806,7 +806,7 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans) return -ERFKILL; } - iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF); + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_nic_init(trans); if (ret) { @@ -815,17 +815,17 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans) } /* make sure rfkill handshake bits are cleared */ - iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ - iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF); + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); iwl_enable_interrupts(trans); /* really make sure rfkill handshake bits are cleared */ - iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); return 0; } @@ -836,7 +836,7 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans) */ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) { - iwl_write_prph(bus(trans), SCD_TXFACT, mask); + iwl_write_prph(trans, SCD_TXFACT, mask); } static void iwl_tx_start(struct iwl_trans *trans) @@ -852,46 +852,46 @@ static void iwl_tx_start(struct iwl_trans *trans) spin_lock_irqsave(&trans->shrd->lock, flags); trans_pcie->scd_base_addr = - iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR); + iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; /* reset conext data memory */ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; a += 4) - iwl_write_targ_mem(bus(trans), a, 0); + iwl_write_targ_mem(trans, a, 0); /* reset tx status memory */ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; a += 4) - iwl_write_targ_mem(bus(trans), a, 0); + iwl_write_targ_mem(trans, a, 0); for (; a < trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num); a += 4) - iwl_write_targ_mem(bus(trans), a, 0); + iwl_write_targ_mem(trans, a, 0); - iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR, + iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) - iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan), + iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ - reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG); - iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG, + reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL, + iwl_write_prph(trans, SCD_QUEUECHAIN_SEL, SCD_QUEUECHAIN_SEL_ALL(trans)); - iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0); + iwl_write_prph(trans, SCD_AGGR_SEL, 0); /* initiate the queues */ for (i = 0; i < hw_params(trans).max_txq_num; i++) { - iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0); - iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8)); - iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + + iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); + iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(i), 0); - iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + + iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof(u32), ((SCD_WIN_SIZE << @@ -902,7 +902,7 @@ static void iwl_tx_start(struct iwl_trans *trans) SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); } - iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK, + iwl_write_prph(trans, SCD_INTERRUPT_MASK, IWL_MASK(0, hw_params(trans).max_txq_num)); /* Activate all Tx DMA/FIFO channels */ @@ -948,7 +948,7 @@ static void iwl_tx_start(struct iwl_trans *trans) spin_unlock_irqrestore(&trans->shrd->lock, flags); /* Enable L1-Active */ - iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG, + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -974,14 +974,14 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { - iwl_write_direct32(bus(trans), + iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG, + if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000)) IWL_ERR(trans, "Failing on timeout while stopping" " DMA channel %d [0x%08x]", ch, - iwl_read_direct32(bus(trans), + iwl_read_direct32(trans, FH_TSSR_TX_STATUS_REG)); } spin_unlock_irqrestore(&trans->shrd->lock, flags); @@ -1024,13 +1024,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) iwl_trans_rx_stop(trans); #endif /* Power-down device's busmaster DMA clocks */ - iwl_write_prph(bus(trans), APMG_CLK_DIS_REG, + iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); } /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(bus(trans), CSR_GP_CNTRL, + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ @@ -1048,7 +1048,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) tasklet_kill(&trans_pcie->irq_tasklet); /* stop and reset the on-board processor */ - iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); } static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, @@ -1145,10 +1145,10 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = dma_map_single(bus(trans)->dev, + txcmd_phys = dma_map_single(trans->dev, &dev_cmd->hdr, firstlen, DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys))) + if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) return -1; dma_unmap_addr_set(out_meta, mapping, txcmd_phys); dma_unmap_len_set(out_meta, len, firstlen); @@ -1164,10 +1164,10 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { - phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len, + phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, secondlen, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) { - dma_unmap_single(bus(trans)->dev, + if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { + dma_unmap_single(trans->dev, dma_unmap_addr(out_meta, mapping), dma_unmap_len(out_meta, len), DMA_BIDIRECTIONAL); @@ -1185,7 +1185,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, offsetof(struct iwl_tx_cmd, scratch); /* take back ownership of DMA buffer to enable update */ - dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen, + dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); @@ -1199,7 +1199,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up entry for this TFD in Tx byte-count array */ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); - dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, + dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); trace_iwlwifi_dev_tx(priv(trans), @@ -1232,7 +1232,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans) { /* Remove all resets to allow NIC to operate */ - iwl_write32(bus(trans), CSR_RESET, 0); + iwl_write32(trans, CSR_RESET, 0); } static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) @@ -1355,7 +1355,7 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans) iwl_apm_stop(priv(trans)); } else { iwl_disable_interrupts(trans); - iwl_clear_bit(bus(trans), CSR_GP_CNTRL, + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } @@ -1368,7 +1368,7 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) iwl_enable_interrupts(trans); - if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) & + if (!(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) hw_rfkill = true; @@ -1464,9 +1464,9 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", - iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt)) + iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt))); + iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); return 1; } @@ -1520,7 +1520,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), - iwl_read_direct32(bus(trans), fh_tbl[i])); + iwl_read_direct32(trans, fh_tbl[i])); } return pos; } @@ -1529,7 +1529,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), - iwl_read_direct32(bus(trans), fh_tbl[i])); + iwl_read_direct32(trans, fh_tbl[i])); } return 0; } @@ -1599,7 +1599,7 @@ void iwl_dump_csr(struct iwl_trans *trans) for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { IWL_ERR(trans, " %25s: 0X%08x\n", get_csr_string(csr_tbl[i]), - iwl_read32(bus(trans), csr_tbl[i])); + iwl_read32(trans, csr_tbl[i])); } } |