diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/trans.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 302 |
1 files changed, 160 insertions, 142 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b10e3633df1a..7f05fc56587a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, (*first_ucode_section)++; } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* @@ -868,19 +868,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, int cpu, int *first_ucode_section) { - int shift_param; int i, ret = 0; u32 last_read_idx = 0; - if (cpu == 1) { - shift_param = 0; + if (cpu == 1) *first_ucode_section = 0; - } else { - shift_param = 16; + else (*first_ucode_section)++; - } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* @@ -1066,6 +1062,137 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } +static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +{ + bool hw_rfkill = iwl_is_rfkill_set(trans); + + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + return hw_rfkill; +} + +struct iwl_causes_list { + u32 cause_num; + u32 mask_reg; + u8 addr; +}; + +static struct iwl_causes_list causes_list[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + +static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; + int i; + + /* + * Access all non RX causes and map them to the default irq. + * In case we are missing at least one interrupt vector, + * the first interrupt vector will serve non-RX and FBQ causes. + */ + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); + iwl_clear_bit(trans, causes_list[i].mask_reg, + causes_list[i].cause_num); + } +} + +static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 offset = + trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + u32 val, idx; + + /* + * The first RX queue - fallback queue, which is designated for + * management frame, command responses etc, is always mapped to the + * first interrupt vector. The other RX queues are mapped to + * the other (N - 2) interrupt vectors. + */ + val = BIT(MSIX_FH_INT_CAUSES_Q(0)); + for (idx = 1; idx < trans->num_rx_queues; idx++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), + MSIX_FH_INT_CAUSES_Q(idx - offset)); + val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); + } + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); + + val = MSIX_FH_INT_CAUSES_Q(0); + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + val |= MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); +} + +static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + if (!trans_pcie->msix_enabled) { + if (trans->cfg->mq_rx_supported && + test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_write_prph(trans, UREG_CHICK, + UREG_CHICK_MSI_ENABLE); + return; + } + /* + * The IVAR table needs to be configured again after reset, + * but if the device is disabled, we can't write to + * prph. + */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + + /* + * Each cause from the causes list above and the RX causes is + * represented as a byte in the IVAR table. The first nibble + * represents the bound interrupt vector of the cause, the second + * represents no auto clear for this cause. This will be set if its + * interrupt vector is bound to serve other causes. + */ + iwl_pcie_map_rx_causes(trans); + + iwl_pcie_map_non_rx_causes(trans); +} + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + iwl_pcie_conf_msix_hw(trans_pcie); + + if (!trans_pcie->msix_enabled) + return; + + trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_mask = trans_pcie->hw_init_mask; +} + static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1119,6 +1246,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) usleep_range(1000, 2000); /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all @@ -1208,12 +1344,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1261,13 +1392,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1347,6 +1472,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; int ret; @@ -1359,11 +1485,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_enable_rx_wake(trans, true); /* - * Also enables interrupts - none will happen as the device doesn't - * know we're waking it up, only when the opmode actually tells it - * after this call. + * Reconfigure IVAR table in case of MSIX or reset ict table in + * MSI mode since HW reset erased it. + * Also enables interrupts - none will happen as + * the device doesn't know we're waking it up, only when + * the opmode actually tells it after this call. */ - iwl_pcie_reset_ict(trans); + iwl_pcie_conf_msix_hw(trans_pcie); + if (!trans_pcie->msix_enabled) + iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); @@ -1406,109 +1536,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -struct iwl_causes_list { - u32 cause_num; - u32 mask_reg; - u8 addr; -}; - -static struct iwl_causes_list causes_list[] = { - {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, - {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, - {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, - {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, - {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, - {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, - {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, - {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, - {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, - {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, - {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, - {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, -}; - -static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; - int i; - - /* - * Access all non RX causes and map them to the default irq. - * In case we are missing at least one interrupt vector, - * the first interrupt vector will serve non-RX and FBQ causes. - */ - for (i = 0; i < ARRAY_SIZE(causes_list); i++) { - iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); - iwl_clear_bit(trans, causes_list[i].mask_reg, - causes_list[i].cause_num); - } -} - -static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 offset = - trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; - u32 val, idx; - - /* - * The first RX queue - fallback queue, which is designated for - * management frame, command responses etc, is always mapped to the - * first interrupt vector. The other RX queues are mapped to - * the other (N - 2) interrupt vectors. - */ - val = BIT(MSIX_FH_INT_CAUSES_Q(0)); - for (idx = 1; idx < trans->num_rx_queues; idx++) { - iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), - MSIX_FH_INT_CAUSES_Q(idx - offset)); - val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); - } - iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); - - val = MSIX_FH_INT_CAUSES_Q(0); - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) - val |= MSIX_NON_AUTO_CLEAR_CAUSE; - iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); - - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) - iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); -} - -static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) -{ - struct iwl_trans *trans = trans_pcie->trans; - - if (!trans_pcie->msix_enabled) { - if (trans->cfg->mq_rx_supported) - iwl_write_prph(trans, UREG_CHICK, - UREG_CHICK_MSI_ENABLE); - return; - } - - iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); - - /* - * Each cause from the causes list above and the RX causes is - * represented as a byte in the IVAR table. The first nibble - * represents the bound interrupt vector of the cause, the second - * represents no auto clear for this cause. This will be set if its - * interrupt vector is bound to serve other causes. - */ - iwl_pcie_map_rx_causes(trans); - - iwl_pcie_map_non_rx_causes(trans); - - trans_pcie->fh_init_mask = - ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); - trans_pcie->fh_mask = trans_pcie->fh_init_mask; - trans_pcie->hw_init_mask = - ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); - trans_pcie->hw_mask = trans_pcie->hw_init_mask; -} - static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans) { @@ -1659,7 +1686,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; int err; lockdep_assert_held(&trans_pcie->mutex); @@ -1677,19 +1703,15 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) iwl_pcie_apm_init(trans); iwl_pcie_init_msix(trans_pcie); + /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); /* Set is_down to false here so that...*/ trans_pcie->is_down = false; - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - /* ... rfkill can call stop_device and set it false if needed */ - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + /* ...rfkill can call stop_device and set it false if needed */ + iwl_trans_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) @@ -2960,16 +2982,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } - if (cfg->mq_rx_supported) - addr_size = 64; - else - addr_size = 36; - if (cfg->use_tfh) { + addr_size = 64; trans_pcie->max_tbs = IWL_TFH_NUM_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); - } else { + addr_size = 36; trans_pcie->max_tbs = IWL_NUM_OF_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfd); } |