diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
29 files changed, 1748 insertions, 648 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 7cb68f6ed1b0..2e0ed080457f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -82,6 +84,19 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; int i, ret; u32 status; + int size; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { + size = sizeof(cmd); + if (phyctxt->channel->band == NL80211_BAND_2GHZ || + !iwl_mvm_is_cdb_supported(mvm)) + cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); + else + cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); + } else { + size = IWL_BINDING_CMD_SIZE_V1; + } memset(&cmd, 0, sizeof(cmd)); @@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(cmd), &cmd, &status); + size, &cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 5bdb6c2c8390..49b4418e6c35 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) return; /* No BT - reports should be disabled */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c7eb1983c4f9..119a3bd92c50 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_binding_cmd binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; u32 status; + int size; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { + size = sizeof(binding_cmd); + if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ || + !iwl_mvm_is_cdb_supported(mvm)) + binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); + else + binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); + } else { + size = IWL_BINDING_CMD_SIZE_V1; + } /* add back the PHY */ if (WARN_ON(!mvmvif->phy_ctxt)) @@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(binding_cmd), &binding_cmd, - &status); + size, &binding_cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; @@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, goto out; } - if (key_data.use_tkip) { + if (key_data.use_tkip && + !fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, cmd_flags, sizeof(tkip_cmd), @@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { + if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; @@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) */ iwl_mvm_update_changed_regdom(mvm); + if (!unified_image) + /* Re-configure default SAR profile */ + iwl_mvm_sar_select_profile(mvm, 1, 1); + if (mvm->net_detect) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f4d75ffe3d8a..5d475b4850ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && - ap_sta_id != IWL_MVM_STATION_COUNT) { + ap_sta_id != IWL_MVM_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 077bfd8f4c0c..402846650cbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index 480a54af4534..d3cdd889c85c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -73,7 +73,9 @@ #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) -#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_INVALID_STA 0xFF + #define IWL_MVM_TDLS_STA_COUNT 4 enum iwl_ac { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h index 3fa43d1348a2..750510aff70b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd { u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ +#define IWL_NUM_GEO_PROFILES 3 + +/** + * enum iwl_geo_per_chain_offset_operation - type of operation + * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. + * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. + */ +enum iwl_geo_per_chain_offset_operation { + IWL_PER_CHAIN_OFFSET_SET_TABLES, + IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, +}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ + +/** + * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. + * @max_tx_power: maximum allowed tx power. + * @chain_a: tx power offset for chain a. + * @chain_b: tx power offset for chain b. + */ +struct iwl_per_chain_offset { + __le16 max_tx_power; + u8 chain_a; + u8 chain_b; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ + +struct iwl_per_chain_offset_group { + struct iwl_per_chain_offset lb; + struct iwl_per_chain_offset hb; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ + +/** + * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + */ +struct iwl_geo_tx_power_profiles_cmd { + __le32 ops; + struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; +} __packed; /* GEO_TX_POWER_LIMIT */ + /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index c78a0c499459..3178eb96e395 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -516,7 +516,7 @@ struct iwl_scan_dwell { * scan_config_channel_flag * @channel_array: default supported channels */ -struct iwl_scan_config { +struct iwl_scan_config_v1 { __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -532,7 +532,7 @@ struct iwl_scan_config { #define SCAN_TWO_LMACS 2 -struct iwl_scan_config_cdb { +struct iwl_scan_config { __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -669,7 +669,7 @@ struct iwl_scan_req_umac { u8 n_channels; __le16 reserved; u8 data[]; - } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; @@ -679,13 +679,13 @@ struct iwl_scan_req_umac { u8 n_channels; __le16 reserved; u8 data[]; - } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */ + } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac) -#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) +#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(__le32)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 3b5150e9975d..e79df1c53d68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -179,7 +179,7 @@ enum iwl_sta_key_flag { * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx - * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs + * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count @@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 { * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls - * @tfd_queue_msk: tfd queues used by this station + * @tfd_queue_msk: tfd queues used by this station. + * Obselete for new TX API (9 and above). * @rx_ba_window: aggregation window size - * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means - * that the queues used by this station are in the first 32. + * @sp_length: the size of the SP as it appears in the WME IE + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx @@ -384,33 +386,55 @@ struct iwl_mvm_add_sta_cmd { __le16 beamform_flags; __le32 tfd_queue_msk; __le16 rx_ba_window; - u8 scd_queue_bank; - u8 uapsd_trigger_acs; -} __packed; /* ADD_STA_CMD_API_S_VER_8 */ + u8 sp_length; + u8 uapsd_acs; +} __packed; /* ADD_STA_CMD_API_S_VER_9 */ /** - * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * struct iwl_mvm_add_sta_key_common - add/modify sta key common part * ( REPLY_ADD_STA_KEY = 0x17 ) * @sta_id: index of station in uCode's station table * @key_offset: key offset in key storage * @key_flags: type %iwl_sta_key_flag * @key: key material data * @rx_secur_seq_cnt: RX security sequence counter for the key - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx */ -struct iwl_mvm_add_sta_key_cmd { +struct iwl_mvm_add_sta_key_common { u8 sta_id; u8 key_offset; __le16 key_flags; u8 key[32]; u8 rx_secur_seq_cnt[16]; +} __packed; + +/** + * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + */ +struct iwl_mvm_add_sta_key_cmd_v1 { + struct iwl_mvm_add_sta_key_common common; u8 tkip_rx_tsc_byte2; u8 reserved; __le16 tkip_rx_ttak[5]; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ /** + * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @rx_mic_key: TKIP RX unicast or multicast key + * @tx_mic_key: TKIP TX key + * @transmit_seq_cnt: TSC, transmit packet number + */ +struct iwl_mvm_add_sta_key_cmd { + struct iwl_mvm_add_sta_key_common common; + __le64 rx_mic_key; + __le64 tx_mic_key; + __le64 transmit_seq_cnt; +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ + +/** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command * @ADD_STA_SUCCESS: operation was executed successfully * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index b38cc073adcc..81b98915b1a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -124,6 +125,20 @@ enum iwl_tx_flags { }; /* TX_FLAGS_BITS_API_S_VER_1 */ /** + * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000 + * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command + * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs + * to a secured STA + * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate + * selection, retry limits and BT kill + */ +enum iwl_tx_cmd_flags { + IWL_TX_FLAGS_CMD_RATE = BIT(0), + IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), + IWL_TX_FLAGS_HIGH_PRI = BIT(2), +}; /* TX_FLAGS_BITS_API_S_VER_3 */ + +/** * enum iwl_tx_pm_timeouts - pm timeout values in TX command * @PM_FRAME_NONE: no need to suspend sleep mode * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU @@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl { TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_KEY128 = 0x08, - TX_CMD_SEC_KEY_FROM_TABLE = 0x08, + TX_CMD_SEC_KEY_FROM_TABLE = 0x10, }; /* TODO: how does these values are OK with only 16 bit variable??? */ @@ -301,6 +316,31 @@ struct iwl_tx_cmd { struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_6 */ +struct iwl_dram_sec_info { + __le32 pn_low; + __le16 pn_high; + __le16 aux_info; +} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ + +/** + * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration + * @tx_flags: combination of &iwl_tx_cmd_flags + * @dram_info: FW internal DRAM storage + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + */ +struct iwl_tx_cmd_gen2 { + __le16 len; + __le16 offload_assist; + __le32 flags; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_7 */ + /* * TX response related data */ @@ -508,9 +548,11 @@ struct agg_tx_status { * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control + * @tx_queue: TX queue for this response * @status: for non-agg: frame status TX_STATUS_* * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields * follow this one, up to frame_count. + * For version 6 TX response isn't received for aggregation at all. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. @@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp { u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; - - struct agg_tx_status status; -} __packed; /* TX_RSP_API_S_VER_3 */ + union { + struct { + struct agg_tx_status status; + } v3;/* TX_RSP_API_S_VER_3 */ + struct { + __le16 tx_queue; + __le16 reserved2; + struct agg_tx_status status; + } v6; + }; +} __packed; /* TX_RSP_API_S_VER_6 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA @@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif { * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue * @q_num: TFD queue number * @tfd_index: Index of first un-acked frame in the TFD queue + * @scd_queue: For debug only - the physical queue the TFD queue is bound to */ struct iwl_mvm_compressed_ba_tfd { - u8 q_num; - u8 reserved; + __le16 q_num; __le16 tfd_index; + u8 scd_queue; + u8 reserved; + __le16 reserved2; } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ /** @@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags { * @tx_rate: the rate the aggregation was sent at * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements + * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd + * for details. + * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See + * &iwl_mvm_compressed_ba_ratid for more details. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; @@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif { __le16 query_frame_cnt; __le16 txed; __le16 done; + __le16 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; @@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd { __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ -/** - * iwl_mvm_get_scd_ssn - returns the SSN of the SCD - * @tx_resp: the Tx response from the fw (agg or non-agg) - * - * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since - * it can't know that everything will go well until the end of the AMPDU, it - * can't know in advance the number of MPDUs that will be sent in the current - * batch. This is why it writes the agg Tx response while it fetches the MPDUs. - * Hence, it can't know in advance what the SSN of the SCD will be at the end - * of the batch. This is why the SSN of the SCD is written at the end of the - * whole struct at a variable offset. This function knows how to cope with the - * variable offset and returns the SSN of the SCD. - */ -static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) -{ - return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & 0xfff; -} - /* Available options for the SCD_QUEUE_CFG HCMD */ enum iwl_scd_cfg_actions { SCD_CFG_DISABLE_QUEUE = 0x0, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index cf2b836f3888..f545c5f9e4e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CTDP_CONFIG_CMD = 0x03, TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + GEO_TX_POWER_LIMIT = 0x05, CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; enum iwl_system_subcmd_ids { SHARED_MEM_CFG_CMD = 0x0, + INIT_EXTENDED_CFG_CMD = 0x03, }; enum iwl_data_path_subcmd_ids { @@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids { NVM_ACCESS_COMPLETE = 0x0, }; -enum iwl_fmac_debug_cmds { +enum iwl_debug_cmds { LMAC_RD_WR = 0x0, UMAC_RD_WR = 0x1, + MFU_ASSERT_DUMP_NTF = 0xFE, }; /* command groups */ @@ -673,10 +676,8 @@ struct iwl_error_resp { /* Common PHY, MAC and Bindings definitions */ - #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) -#define AUX_BINDING_INDEX (3) /* Used to extract ID and color from the context dword */ #define FW_CTXT_ID_POS (0) @@ -689,7 +690,7 @@ struct iwl_error_resp { (_color << FW_CTXT_COLOR_POS)) /* Possible actions on PHYs, MACs and Bindings */ -enum { +enum iwl_phy_ctxt_action { FW_CTXT_ACTION_STUB = 0, FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_MODIFY, @@ -960,6 +961,7 @@ struct iwl_time_event_notif { * @action: action to perform, one of FW_CTXT_ACTION_* * @macs: array of MAC id and colors which belong to the binding * @phy: PHY id and color which belongs to the binding + * @lmac_id: the lmac id the binding belongs to */ struct iwl_binding_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ @@ -968,7 +970,13 @@ struct iwl_binding_cmd { /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ + /* BINDING_CMD_API_S_VER_1 */ + __le32 lmac_id; +} __packed; /* BINDING_CMD_API_S_VER_2 */ + +#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id) +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 @@ -990,6 +998,9 @@ struct iwl_time_quota_data { * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding + * Note: on non-CDB the fourth one is the auxilary mac and is + * essentially zero. + * On CDB the fourth one is a regular binding. */ struct iwl_time_quota_cmd { struct iwl_time_quota_data quotas[MAX_BINDINGS]; @@ -1231,6 +1242,25 @@ struct iwl_mfuart_load_notif { } __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ /** + * struct iwl_mfu_assert_dump_notif - mfuart dump logs + * ( MFU_ASSERT_DUMP_NTF = 0xfe ) + * @assert_id: mfuart assert id that cause the notif + * @curr_reset_num: number of asserts since uptime + * @index_num: current chunk id + * @parts_num: total number of chunks + * @data_size: number of data bytes sent + * @data: data buffer + */ +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/ + +/** * struct iwl_set_calib_default_cmd - set default value for calibration. * ( SET_CALIB_DEFAULT_CMD = 0x8e ) * @calib_index: the calibration to set value for @@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 { __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * Shared memory configuration information from the FW + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; __le32 page_buff_addr; __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ /** @@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd { __le32 reserved; } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ +/** + * enum iwl_extended_cfg_flag - commands driver may send before + * finishing init flow + * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command + * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands + * @IWL_INIT_PHY: driver is going to send PHY_DB commands + */ +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG, + IWL_INIT_NVM, + IWL_INIT_PHY, +}; + +/** + * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * before finishing init flows + * @init_flags: values from iwl_extended_cfg_flags + */ +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index a027b11bbdb3..7b86a4f1b574 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, iwl_trans_release_nic_access(mvm->trans, &flags); } +static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the RXF */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_D_SPACE + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_SET_FENCE_MODE + offset)); + + /* Lock fence */ + iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1); + /* Set fence offset */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(mvm->trans, + RXF_FIFO_RD_FENCE_INC + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + +static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the FIFO */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FIFO_ITEM_CNT + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_LOCK_FENCE + offset)); + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset, + TXF_WR_PTR + offset); + + /* Dummy-read to advance the read pointer to the head */ + iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(mvm->trans, + TXF_READ_MODIFY_DATA + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; + struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; @@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) return; - /* Pull RXF data from all RXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { - /* - * Keep aside the additional offset that might be needed for - * next RXF - */ - u32 offset_diff = RXF_DIFF_FROM_PREV * i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the RXF */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_D_SPACE + - offset_diff)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_WR_PTR + - offset_diff)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_RD_PTR + - offset_diff)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_FENCE_PTR + - offset_diff)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_SET_FENCE_MODE + - offset_diff)); - - /* Lock fence */ - iwl_trans_write_prph(mvm->trans, - RXF_SET_FENCE_MODE + offset_diff, 0x1); - /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_WR2FENCE + offset_diff, 0x1); - /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_FENCE_OFFSET_ADDR + offset_diff, - 0x0); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - RXF_FIFO_RD_FENCE_INC + - offset_diff); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - /* Pull TXF data from all TXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { + /* Pull RXF1 */ + iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); + /* Pull RXF2 */ + iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); + /* Pull LMAC2 RXF1 */ + if (mvm->smem_cfg.num_lmacs > 1) + iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); + + /* Pull TXF data from LMAC1 */ + for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); + iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], + 0, i); + } - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the FIFO */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FIFO_ITEM_CNT)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_WR_PTR)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_RD_PTR)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FENCE_PTR)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_LOCK_FENCE)); - - /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, - TXF_WR_PTR); - - /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - TXF_READ_MODIFY_DATA); - *dump_data = iwl_fw_error_next_data(*dump_data); + /* Pull TXF data from LMAC2 */ + if (mvm->smem_cfg.num_lmacs > 1) { + for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, + TXF_LARC_NUM + LMAC2_PRPH_OFFSET, + i); + iwl_mvm_dump_txf(mvm, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); + } } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; - i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; + fifo_len = mvm->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) @@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size)); + mvm->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(mvm->trans, @@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; + struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg; fifo_data_len = 0; - /* Count RXF size */ - for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { - if (!mem_cfg->rxfifo_size[i]) - continue; - + /* Count RXF2 size */ + if (mem_cfg->rxfifo2_size) { /* Add header info */ - fifo_data_len += mem_cfg->rxfifo_size[i] + + fifo_data_len += mem_cfg->rxfifo2_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } - for (i = 0; i < mem_cfg->num_txfifo_entries; i++) { - if (!mem_cfg->txfifo_size[i]) + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + if (!mem_cfg->lmac[i].rxfifo1_size) continue; /* Add header info */ - fifo_data_len += mem_cfg->txfifo_size[i] + + fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { + if (!mem_cfg->lmac[i].txfifo_size[j]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->lmac[i].txfifo_size[j] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 45cb4f476e76..900f1e25b9da 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) return 0; } +void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; + __le32 *dump_data = mfu_dump_notif->data; + int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); + int i; + + if (mfu_dump_notif->index_num == 0) + IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", + le32_to_cpu(mfu_dump_notif->assert_id)); + + for (i = 0; i < n_words; i++) + IWL_DEBUG_INFO(mvm, + "MFUART assert dump, dword %u: 0x%08x\n", + le16_to_cpu(mfu_dump_notif->index_num) * + n_words + i, + le32_to_cpu(dump_data[i])); +} + static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) { @@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + struct iwl_trans *trans = mvm->trans; + + if (trans->cfg->gen2) + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), + iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS)); + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", - iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), - iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); + iwl_read_prph(trans, SB_CPU_1_STATUS), + iwl_read_prph(trans, SB_CPU_2_STATUS)); mvm->cur_ucode = old_type; return ret; } @@ -807,6 +836,9 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait init_wait; struct iwl_nvm_access_complete_cmd nvm_complete = {}; + struct iwl_init_extended_cfg_cmd init_cfg = { + .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), + }; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; @@ -828,10 +860,14 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) goto error; } - /* TODO: remove when integrating context info */ - ret = iwl_mvm_init_paging(mvm); + /* Send init config command to mark that we are sending NVM access + * commands + */ + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, + INIT_EXTENDED_CFG_CMD), 0, + sizeof(init_cfg), &init_cfg); if (ret) { - IWL_ERR(mvm, "Failed to init paging: %d\n", + IWL_ERR(mvm, "Failed to run init config command: %d\n", ret); goto error; } @@ -876,24 +912,27 @@ static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; - int i; + int i, lmac; + int lmac_num = le32_to_cpu(mem_cfg->lmac_num); - mvm->shared_mem_cfg.num_txfifo_entries = - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->shared_mem_cfg.txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) - mvm->shared_mem_cfg.rxfifo_size[i] = - le32_to_cpu(mem_cfg->rxfifo_size[i]); + if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) + return; - BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != - sizeof(mem_cfg->internal_txfifo_size)); + mvm->smem_cfg.num_lmacs = lmac_num; + mvm->smem_cfg.num_txfifo_entries = + ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); + mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); - i++) - mvm->shared_mem_cfg.internal_txfifo_size[i] = - le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + for (lmac = 0; lmac < lmac_num; lmac++) { + struct iwl_shared_mem_lmac_cfg *lmac_cfg = + &mem_cfg->lmac_smem[lmac]; + + for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) + mvm->smem_cfg.lmac[lmac].txfifo_size[i] = + le32_to_cpu(lmac_cfg->txfifo_size[i]); + mvm->smem_cfg.lmac[lmac].rxfifo1_size = + le32_to_cpu(lmac_cfg->rxfifo1_size); + } } static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, @@ -902,25 +941,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data; int i; - mvm->shared_mem_cfg.num_txfifo_entries = - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); + mvm->smem_cfg.num_lmacs = 1; + + mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->shared_mem_cfg.txfifo_size[i] = + mvm->smem_cfg.lmac[0].txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) - mvm->shared_mem_cfg.rxfifo_size[i] = - le32_to_cpu(mem_cfg->rxfifo_size[i]); + + mvm->smem_cfg.lmac[0].rxfifo1_size = + le32_to_cpu(mem_cfg->rxfifo_size[0]); + mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); /* new API has more data, from rxfifo_addr field and on */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != + BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != sizeof(mem_cfg->internal_txfifo_size)); for (i = 0; - i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); i++) - mvm->shared_mem_cfg.internal_txfifo_size[i] = + mvm->smem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]); } } @@ -969,85 +1010,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) sizeof(cmd), &cmd); } -#define ACPI_WRDS_METHOD "WRDS" -#define ACPI_WRDS_WIFI (0x07) -#define ACPI_WRDS_TABLE_SIZE 10 +#ifdef CONFIG_ACPI +#define ACPI_WRDS_METHOD "WRDS" +#define ACPI_EWRD_METHOD "EWRD" +#define ACPI_WGDS_METHOD "WGDS" +#define ACPI_WIFI_DOMAIN (0x07) +#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2) +#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \ + IWL_MVM_SAR_TABLE_SIZE + 3) +#define ACPI_WGDS_WIFI_DATA_SIZE 18 +#define ACPI_WGDS_NUM_BANDS 2 +#define ACPI_WGDS_TABLE_SIZE 3 + +static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, + union acpi_object *table, + struct iwl_mvm_sar_profile *profile, + bool enabled) +{ + int i; -struct iwl_mvm_sar_table { - bool enabled; - u8 values[ACPI_WRDS_TABLE_SIZE]; -}; + profile->enabled = enabled; -#ifdef CONFIG_ACPI -static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds, - struct iwl_mvm_sar_table *sar_table) + for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) { + if ((table[i].type != ACPI_TYPE_INTEGER) || + (table[i].integer.value > U8_MAX)) + return -EINVAL; + + profile->table[i] = table[i].integer.value; + } + + return 0; +} + +static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, + union acpi_object *data, + int data_size) { - union acpi_object *data_pkg; - u32 i; + int i; + union acpi_object *wifi_pkg; - /* We need at least two packages, one for the revision and one + /* + * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid * (i.e. it is an integer set to 0). - */ - if (wrds->type != ACPI_TYPE_PACKAGE || - wrds->package.count < 2 || - wrds->package.elements[0].type != ACPI_TYPE_INTEGER || - wrds->package.elements[0].integer.value != 0) { - IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n"); - return -EINVAL; + */ + if (data->type != ACPI_TYPE_PACKAGE || + data->package.count < 2 || + data->package.elements[0].type != ACPI_TYPE_INTEGER || + data->package.elements[0].integer.value != 0) { + IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n"); + return ERR_PTR(-EINVAL); } /* loop through all the packages to find the one for WiFi */ - for (i = 1; i < wrds->package.count; i++) { + for (i = 1; i < data->package.count; i++) { union acpi_object *domain; - data_pkg = &wrds->package.elements[i]; + wifi_pkg = &data->package.elements[i]; /* Skip anything that is not a package with the right * amount of elements (i.e. domain_type, - * enabled/disabled plus the sar table size. + * enabled/disabled plus the actual data size. */ - if (data_pkg->type != ACPI_TYPE_PACKAGE || - data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2) + if (wifi_pkg->type != ACPI_TYPE_PACKAGE || + wifi_pkg->package.count != data_size) continue; - domain = &data_pkg->package.elements[0]; + domain = &wifi_pkg->package.elements[0]; if (domain->type == ACPI_TYPE_INTEGER && - domain->integer.value == ACPI_WRDS_WIFI) + domain->integer.value == ACPI_WIFI_DOMAIN) break; - data_pkg = NULL; + wifi_pkg = NULL; } - if (!data_pkg) - return -ENOENT; + if (!wifi_pkg) + return ERR_PTR(-ENOENT); - if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) - return -EINVAL; - - sar_table->enabled = !!(data_pkg->package.elements[1].integer.value); - - for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) { - union acpi_object *entry; - - entry = &data_pkg->package.elements[i + 2]; - if ((entry->type != ACPI_TYPE_INTEGER) || - (entry->integer.value > U8_MAX)) - return -EINVAL; - - sar_table->values[i] = entry->integer.value; - } - - return 0; + return wifi_pkg; } -static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, - struct iwl_mvm_sar_table *sar_table) +static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { + union acpi_object *wifi_pkg, *table; acpi_handle root_handle; acpi_handle handle; struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; + bool enabled; int ret; root_handle = ACPI_HANDLE(mvm->dev); @@ -1072,62 +1122,301 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, return -ENOENT; } - ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table); + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer, + ACPI_WRDS_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + + /* position of the actual table */ + table = &wifi_pkg->package.elements[2]; + + /* The profile from WRDS is officially profile 1, but goes + * into sar_profiles[0] (because we don't have a profile 0). + */ + ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], + enabled); + +out_free: kfree(wrds.pointer); + return ret; +} + +static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) +{ + union acpi_object *wifi_pkg; + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + bool enabled; + int i, n_profiles, ret; + root_handle = ACPI_HANDLE(mvm->dev); + if (!root_handle) { + IWL_DEBUG_RADIO(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "EWRD method not found\n"); + return -ENOENT; + } + + /* Call EWRD with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &ewrd); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status); + return -ENOENT; + } + + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer, + ACPI_EWRD_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || + (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { + ret = -EINVAL; + goto out_free; + } + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + n_profiles = wifi_pkg->package.elements[2].integer.value; + + for (i = 0; i < n_profiles; i++) { + /* the tables start at element 3 */ + static int pos = 3; + + /* The EWRD profiles officially go from 2 to 4, but we + * save them in sar_profiles[1-3] (because we don't + * have profile 0). So in the array we start from 1. + */ + ret = iwl_mvm_sar_set_profile(mvm, + &wifi_pkg->package.elements[pos], + &mvm->sar_profiles[i + 1], + enabled); + if (ret < 0) + break; + + /* go to the next table */ + pos += IWL_MVM_SAR_TABLE_SIZE; + } + +out_free: + kfree(ewrd.pointer); return ret; } -#else /* CONFIG_ACPI */ -static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, - struct iwl_mvm_sar_table *sar_table) + +static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm, + struct iwl_mvm_geo_table *geo_table) { - return -ENOENT; + union acpi_object *wifi_pkg; + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + int i, ret; + + root_handle = ACPI_HANDLE(mvm->dev); + if (!root_handle) { + IWL_DEBUG_RADIO(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WGDS method not found\n"); + return -ENOENT; + } + + /* Call WGDS with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &wgds); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status); + return -ENOENT; + } + + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer, + ACPI_WGDS_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) { + union acpi_object *entry; + + entry = &wifi_pkg->package.elements[i + 1]; + if ((entry->type != ACPI_TYPE_INTEGER) || + (entry->integer.value > U8_MAX)) + return -EINVAL; + + geo_table->values[i] = entry->integer.value; + } + ret = 0; +out_free: + kfree(wgds.pointer); + return ret; } -#endif /* CONFIG_ACPI */ -static int iwl_mvm_sar_init(struct iwl_mvm *mvm) +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - struct iwl_mvm_sar_table sar_table; struct iwl_dev_tx_power_cmd cmd = { .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; - int ret, i, j, idx; + int i, j, idx; + int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; int len = sizeof(cmd); + BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2); + BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != + IWL_MVM_SAR_TABLE_SIZE); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v3); - ret = iwl_mvm_sar_get_table(mvm, &sar_table); + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + struct iwl_mvm_sar_profile *prof; + + /* don't allow SAR to be disabled (profile 0 means disable) */ + if (profs[i] == 0) + return -EPERM; + + /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */ + if (profs[i] > IWL_MVM_SAR_PROFILE_NUM) + return -EINVAL; + + /* profiles go from 1 to 4, so decrement to access the array */ + prof = &mvm->sar_profiles[profs[i] - 1]; + + /* if the profile is disabled, do nothing */ + if (!prof->enabled) { + IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n", + profs[i]); + /* if one of the profiles is disabled, we fail all */ + return -ENOENT; + } + + IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); + for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { + idx = (i * IWL_NUM_SUB_BANDS) + j; + cmd.v3.per_chain_restriction[i][j] = + cpu_to_le16(prof->table[idx]); + IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", + j, prof->table[idx]); + } + } + + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); +} + +static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_geo_table geo_table; + struct iwl_geo_tx_power_profiles_cmd cmd = { + .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), + }; + int ret, i, j, idx; + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); + + ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table); if (ret < 0) { IWL_DEBUG_RADIO(mvm, - "SAR BIOS table invalid or unavailable. (%d)\n", + "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ return 0; } - if (!sar_table.enabled) - return 0; + IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); - IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); + BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * + ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); - BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != - ACPI_WRDS_TABLE_SIZE); + for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) { + struct iwl_per_chain_offset *chain = + (struct iwl_per_chain_offset *)&cmd.table[i]; - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { - IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); - for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { - idx = (i * IWL_NUM_SUB_BANDS) + j; - cmd.v3.per_chain_restriction[i][j] = - cpu_to_le16(sar_table.values[idx]); - IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", - j, sar_table.values[idx]); + for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { + u8 *value; + + idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE + + j * ACPI_WGDS_TABLE_SIZE; + value = &geo_table.values[idx]; + chain[j].max_tx_power = cpu_to_le16(value[0]); + chain[j].chain_a = value[1]; + chain[j].chain_b = value[2]; + IWL_DEBUG_RADIO(mvm, + "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", + i, j, value[1], value[2], value[0]); } } + return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); +} - ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); - if (ret) - IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret); +#else /* CONFIG_ACPI */ +static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + +static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + +static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif /* CONFIG_ACPI */ + +static int iwl_mvm_sar_init(struct iwl_mvm *mvm) +{ + int ret; + + ret = iwl_mvm_sar_get_wrds_table(mvm); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* if not available, don't fail and don't bother with EWRD */ + return 0; + } + + ret = iwl_mvm_sar_get_ewrd_table(mvm); + /* if EWRD is not available, we can still use WRDS, so don't fail */ + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "EWRD SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + /* choose profile 1 (WRDS) as default for both chains */ + ret = iwl_mvm_sar_select_profile(mvm, 1, 1); + + /* if we don't have profile 0 from BIOS, just skip it */ + if (ret == -ENOENT) + return 0; return ret; } @@ -1219,7 +1508,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* Init RSS configuration */ - if (iwl_mvm_has_new_rx_api(mvm)) { + /* TODO - remove a000 disablement when we have RXQ config API */ + if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", @@ -1229,10 +1519,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); @@ -1313,10 +1603,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (iwl_mvm_is_csum_supported(mvm) && - mvm->cfg->features & NETIF_F_RXCSUM) - iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); - /* allow FW/transport low power modes if not during restart */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1325,6 +1611,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + ret = iwl_mvm_sar_geo_init(mvm); + if (ret) + goto error; + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: @@ -1362,7 +1652,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) goto error; /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); /* Add auxiliary station for scanning */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index c5734e1a02d2..9e69b9d2012c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -472,8 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } - mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; @@ -1442,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *beacon_notify_hdr; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; + struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); @@ -1449,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, beacon_notify_hdr = &beacon->beacon_notify_hdr; mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); - status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; + agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); + status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 841bfdff8750..5cdd95775ba6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -6,8 +6,8 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,7 +33,8 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->sta_id == IWL_MVM_STATION_COUNT || + if (mvmsta->sta_id == IWL_MVM_INVALID_STA || mvmsta->sta_id != mvm->d0i3_ap_sta_id) goto out; @@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); @@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); @@ -1351,6 +1352,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } + if (iwl_mvm_is_dqa_supported(mvm)) { + /* + * Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, + 0, vif->type); + if (ret) + goto out_release; + } + iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } @@ -1516,6 +1528,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } @@ -1952,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } - } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. @@ -1966,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_ERR(mvm, "failed to remove AP station\n"); if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) @@ -2104,6 +2117,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_unbind; + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) + goto out_rm_bcast; + /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2131,6 +2148,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; + iwl_mvm_rm_mcast_sta(mvm, vif); +out_rm_bcast: iwl_mvm_send_rm_bcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); @@ -2177,6 +2196,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); + iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); @@ -2343,6 +2363,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) continue; + if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE) + continue; + __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(tid_data) == 0) @@ -2368,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) @@ -3939,7 +3962,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) @@ -4196,7 +4219,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_has_new_rx_api(mvm)) + /* TODO - remove a000 disablement when we have RXQ config API */ + if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) return; notif->cookie = mvm->queue_sync_cookie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 73a216524af2..1938dfb44152 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -407,6 +407,7 @@ struct iwl_mvm_vif { struct iwl_mvm_time_event_data hs_time_event_data; struct iwl_mvm_int_sta bcast_sta; + struct iwl_mvm_int_sta mcast_sta; /* * Assigned while mac80211 has the interface in a channel context, @@ -603,10 +604,15 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; +#define MAX_NUM_LMAC 2 struct iwl_mvm_shared_mem_cfg { + int num_lmacs; int num_txfifo_entries; - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo_size[RX_FIFO_MAX_NUM]; + struct { + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + u32 rxfifo2_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; @@ -625,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg { * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming + * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context */ @@ -640,6 +647,7 @@ struct iwl_mvm_reorder_buffer { unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; struct timer_list reorder_timer; bool removed; + bool valid; spinlock_t lock; struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; @@ -709,6 +717,21 @@ enum iwl_mvm_queue_status { #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_NUM_CIPHERS 10 +#ifdef CONFIG_ACPI +#define IWL_MVM_SAR_TABLE_SIZE 10 +#define IWL_MVM_SAR_PROFILE_NUM 4 +#define IWL_MVM_GEO_TABLE_SIZE 18 + +struct iwl_mvm_sar_profile { + bool enabled; + u8 table[IWL_MVM_SAR_TABLE_SIZE]; +}; + +struct iwl_mvm_geo_table { + u8 values[IWL_MVM_GEO_TABLE_SIZE]; +}; +#endif + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -975,7 +998,10 @@ struct iwl_mvm { #endif /* Tx queues */ - u8 aux_queue; + u16 aux_queue; + u16 probe_queue; + u16 p2p_dev_queue; + u8 first_agg_queue; u8 last_agg_queue; @@ -1018,7 +1044,7 @@ struct iwl_mvm { } peer; } tdls_cs; - struct iwl_mvm_shared_mem_cfg shared_mem_cfg; + struct iwl_mvm_shared_mem_cfg smem_cfg; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; @@ -1035,6 +1061,9 @@ struct iwl_mvm { bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; +#ifdef CONFIG_ACPI + struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; +#endif }; /* Extract MVM priv from op_mode and _hw */ @@ -1222,13 +1251,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: - * The issue of how to determine CDB support is still not well defined. - * It may be that it will be for all next HW devices and it may be per - * FW compilation and it may also differ between different devices. - * For now take a ride on the new TX API and get back to it when - * it is well defined. + * The issue of how to determine CDB APIs and usage is still not fully + * defined. + * There is a compilation for CDB and non-CDB FW, but there may + * be also runtime check. + * For now there is a TLV for checking compilation mode, but a + * runtime check will also have to be here - once defined. */ - return iwl_mvm_has_new_tx_api(mvm); + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT); +} + +static inline struct agg_tx_status* +iwl_mvm_get_agg_status(struct iwl_mvm *mvm, + struct iwl_mvm_tx_resp *tx_resp) +{ + if (iwl_mvm_has_new_tx_api(mvm)) + return &tx_resp->v6.status; + else + return &tx_resp->v3.status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) @@ -1389,6 +1430,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -1668,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout); +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout); + /* * Disable a TXQ. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. @@ -1701,7 +1747,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - iwl_free_fw_paging(mvm); + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_free_fw_paging(mvm); mvm->ucode_loaded = false; iwl_trans_stop_device(mvm->trans); } @@ -1797,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, u32 duration, u32 timeout); bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); +#ifdef CONFIG_ACPI +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); +#else +static inline +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) +{ + return -ENOENT; +} +#endif /* CONFIG_ACPI */ + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4cd72d4cdc47..888053323c92 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_SYNC), RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, + iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, @@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), + HCMD_NAME(INIT_EXTENDED_CFG_CMD), }; /* Please keep this array *SORTED* by hex value. @@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), + HCMD_NAME(GEO_TX_POWER_LIMIT), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { + HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_PM_NOTIF), @@ -462,6 +467,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { + HCMD_NAME(MFU_ASSERT_DUMP_NTF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; @@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } } else { mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } @@ -1256,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, u8 tid; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) + mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) return false; mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); @@ -1344,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvm_ap_sta; - if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) + if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) return; rcu_read_lock(); @@ -1414,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; } else { WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; mvm->d0i3_offloading = false; } @@ -1427,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) return ret; /* configure wowlan configuration only if needed */ - if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { /* wake on beacons only if beacon storing isn't supported */ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) @@ -1504,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) spin_lock_bh(&mvm->d0i3_tx_lock); - if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) + if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) goto out; IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); @@ -1542,7 +1556,7 @@ out: } clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); wake_up(&mvm->d0i3_exit_waitq); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; if (wake_queues) ieee80211_wake_queues(mvm->hw); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 95138830b9f8..d59efe804356 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { + enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY; + lockdep_assert_held(&mvm->mutex); + /* In CDB mode we cannot modify PHY context between bands so... */ + if (iwl_mvm_has_new_tx_api(mvm) && + ctxt->channel->band != chandef->chan->band) { + int ret; + + /* ... remove it here ...*/ + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_REMOVE, 0); + if (ret) + return ret; + + /* ... and proceed to add it again */ + action = FW_CTXT_ACTION_ADD; + } + ctxt->channel = chandef->chan; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, - FW_CTXT_ACTION_MODIFY, 0); + action, 0); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 20473df79c94..d4c0ca7ccb34 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { - unsigned int hdrlen, fraglen; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + unsigned int fraglen; + + /* + * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, + * but those are all multiples of 4 long) all goes away, but we + * want the *end* of it, which is going to be the start of the IP + * header, to be aligned when it gets pulled in. + * The beginning of the skb->data is aligned on at least a 4-byte + * boundary after allocation. Everything here is aligned at least + * on a 2-byte boundary so we can just take hdrlen & 3 and pad by + * the result. + */ + skb_reserve(skb, hdrlen & 3); /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ - hdrlen = (len <= skb_tailroom(skb)) ? len : - sizeof(*hdr) + crypt_len + 8; + hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; @@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; - if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; @@ -448,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->vht_flag |= RX_VHT_FLAG_BF; } else { - rx_status->rate_idx = - iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); + int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + + if (WARN(rate < 0 || rate > 0xFF, + "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band)) { + kfree_skb(skb); + return; + } + rx_status->rate_idx = rate; } #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, .mvm = mvm, }; int expected_size; + int i; + u8 *energy; + __le32 *bytes, *air_time; if (iwl_mvm_is_cdb_supported(mvm)) expected_size = sizeof(*stats); @@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, else expected_size = sizeof(struct iwl_notif_statistics_v10); - if (iwl_rx_packet_payload_len(pkt) != expected_size) - goto invalid; + if (iwl_rx_packet_payload_len(pkt) != expected_size) { + IWL_ERR(mvm, "received invalid statistics size (%d)!\n", + iwl_rx_packet_payload_len(pkt)); + return; + } data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = @@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; - if (iwl_mvm_has_new_rx_api(mvm)) { - int i; - u8 *energy; - __le32 *bytes, *air_time; - - if (!iwl_mvm_is_cdb_supported(mvm)) { - struct iwl_notif_statistics_v11 *v11 = - (void *)&pkt->data; - - energy = (void *)&v11->load_stats.avg_energy; - bytes = (void *)&v11->load_stats.byte_count; - air_time = (void *)&v11->load_stats.air_time; - } else { - energy = (void *)&stats->load_stats.avg_energy; - bytes = (void *)&stats->load_stats.byte_count; - air_time = (void *)&stats->load_stats.air_time; - } - - rcu_read_lock(); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - struct iwl_mvm_sta *sta; - - if (!energy[i]) - continue; - - sta = iwl_mvm_sta_from_staid_rcu(mvm, i); - if (!sta) - continue; - sta->avg_energy = energy[i]; - } - rcu_read_unlock(); - } iwl_mvm_rx_stats_check_trigger(mvm, pkt); @@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); - return; - invalid: - IWL_ERR(mvm, "received invalid statistics size (%d)!\n", - iwl_rx_packet_payload_len(pkt)); + + if (!iwl_mvm_has_new_rx_api(mvm)) + return; + + if (!iwl_mvm_is_cdb_supported(mvm)) { + struct iwl_notif_statistics_v11 *v11 = + (void *)&pkt->data; + + energy = (void *)&v11->load_stats.avg_energy; + bytes = (void *)&v11->load_stats.byte_count; + air_time = (void *)&v11->load_stats.air_time; + } else { + energy = (void *)&stats->load_stats.avg_energy; + bytes = (void *)&stats->load_stats.byte_count; + air_time = (void *)&stats->load_stats.air_time; + } + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { + struct iwl_mvm_sta *sta; + + if (!energy[i]) + continue; + + sta = iwl_mvm_sta_from_staid_rcu(mvm, i); + if (!sta) + continue; + sta->avg_energy = energy[i]; + } + rcu_read_unlock(); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d79e9c2a2654..24c4fbe139a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) int i; u16 sn = 0, index = 0; bool expired = false; + bool cont = false; spin_lock(&buf->lock); @@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; - if (skb_queue_empty(&buf->entries[index])) + if (skb_queue_empty(&buf->entries[index])) { + /* + * If there is a hole and the next frame didn't expire + * we want to break and not advance SN + */ + cont = false; continue; - if (!time_after(jiffies, buf->reorder_time[index] + - RX_REORDER_BUF_TIMEOUT_MQ)) + } + if (!cont && !time_after(jiffies, buf->reorder_time[index] + + RX_REORDER_BUF_TIMEOUT_MQ)) break; + expired = true; + /* continue until next hole after this expired frames */ + cont = true; sn = ieee80211_sn_add(buf->head_sn, i + 1); } @@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, return false; baid_data = rcu_dereference(mvm->baid_map[baid]); - if (WARN(!baid_data, - "Received baid %d, but no data exists for this BAID\n", baid)) + if (!baid_data) { + WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), + "Received baid %d, but no data exists for this BAID\n", + baid); return false; + } + if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, @@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, spin_lock_bh(&buffer->lock); + if (!buffer->valid) { + if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { + spin_unlock_bh(&buffer->lock); + return false; + } + buffer->valid = true; + } + if (ieee80211_is_back_req(hdr->frame_control)) { iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); goto drop; @@ -727,7 +749,8 @@ drop: return true; } -static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) +static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, + u32 reorder_data, u8 baid) { unsigned long now = jiffies; unsigned long timeout; @@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) rcu_read_lock(); data = rcu_dereference(mvm->baid_map[baid]); - if (WARN_ON(!data)) + if (!data) { + WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); goto out; + } if (!data->timeout) goto out; @@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; - if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; @@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); - rcu_read_unlock(); - return; + goto out; } /* * Our hardware de-aggregates AMSDUs but copies the mac header * as it to the de-aggregated MPDUs. We need to turn off the * AMSDU bit in the QoS control ourselves. + * In addition, HW reverses addr3 and addr4 - reverse it back. */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { + int i; u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 mac_addr[ETH_ALEN]; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - if (!(desc->amsdu_info & - IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) - rx_status->flag |= RX_FLAG_AMSDU_MORE; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1]; + ether_addr_copy(hdr->addr3, mac_addr); + + if (ieee80211_has_a4(hdr->frame_control)) { + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = + hdr->addr4[ETH_ALEN - i - 1]; + ether_addr_copy(hdr->addr4, mac_addr); + } + } + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { + u32 reorder_data = le32_to_cpu(desc->reorder_data); + + iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) - iwl_mvm_agg_rx_received(mvm, baid); } /* Set up the HT phy flags */ @@ -953,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->vht_flag |= RX_VHT_FLAG_BF; } else { - rx_status->rate_idx = - iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); + int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + + if (WARN(rate < 0 || rate > 0xFF, + "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band)) { + kfree_skb(skb); + goto out; + } + rx_status->rate_idx = rate; + } /* management stuff on default queue */ @@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); +out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 0a64efa844b7..9668f945b4e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) channels[j] = band->channels[i].hw_value; } -static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) +static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); - struct iwl_scan_config *cfg = config; + struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); @@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, iwl_mvm_fill_channels(mvm, cfg->channel_array); } -static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) +static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); - struct iwl_scan_config_cdb *cfg = config; + struct iwl_scan_config *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); @@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); cfg->out_of_channel_time[0] = cpu_to_le32(scan_timing[type].max_out_time); - cfg->out_of_channel_time[1] = - cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); - cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time); + + if (iwl_mvm_is_cdb_supported(mvm)) { + cfg->suspend_time[1] = + cpu_to_le32(scan_timing[type].suspend_time); + cfg->out_of_channel_time[1] = + cpu_to_le32(scan_timing[type].max_out_time); + } iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); @@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) { - IWL_DEBUG_SCAN(mvm, - "Ignoring UMAC scan config of the same type\n"); + if (type == mvm->scan_type) return 0; - } - if (iwl_mvm_is_cdb_supported(mvm)) - cmd_size = sizeof(struct iwl_scan_config_cdb); - else + if (iwl_mvm_has_new_tx_api(mvm)) cmd_size = sizeof(struct iwl_scan_config); + else + cmd_size = sizeof(struct iwl_scan_config_v1); cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); @@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - if (iwl_mvm_is_cdb_supported(mvm)) { + if (iwl_mvm_has_new_tx_api(mvm)) { flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; - iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags); - } else { iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); + } else { + iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags); } cmd.data[0] = cfg; @@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, } cmd->fragmented_dwell = timing->dwell_fragmented; - if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time); - cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time); - cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time); - cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + if (iwl_mvm_has_new_tx_api(mvm)) { + cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v6.max_out_time[1] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[1] = + cpu_to_le32(timing->suspend_time); + } } else { - cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time); - cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time); - cmd->no_cdb.scan_priority = + cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time); + cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); + cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } @@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ? - (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data; + void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? + (void *)&cmd->v6.data : (void *)&cmd->v1.data; struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->cdb.channel_flags = channel_flags; - cmd->cdb.n_channels = params->n_channels; + if (iwl_mvm_has_new_tx_api(mvm)) { + cmd->v6.channel_flags = channel_flags; + cmd->v6.n_channels = params->n_channels; } else { - cmd->no_cdb.channel_flags = channel_flags; - cmd->no_cdb.n_channels = params->n_channels; + cmd->v1.channel_flags = channel_flags; + cmd->v1.n_channels = params->n_channels; } iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); @@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { - int base_size = IWL_SCAN_REQ_UMAC_SIZE; + int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_is_cdb_supported(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB; + if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 101fb04a8573..539b06bf0803 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: - if (sta_id == IWL_MVM_STATION_COUNT) { + if (sta_id == IWL_MVM_INVALID_STA) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; @@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; - u8 sta_id = IWL_MVM_STATION_COUNT; + u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, - .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, + .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9d28db7f56aa..a2a1fa06b781 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -98,7 +98,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ - for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { + for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { if (BIT(sta_id) & reserved_ids) continue; @@ -106,7 +106,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, lockdep_is_held(&mvm->mutex))) return sta_id; } - return IWL_MVM_STATION_COUNT; + return IWL_MVM_INVALID_STA; } /* send station add/update command to firmware */ @@ -127,11 +127,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 agg_size = 0, mpdu_dens = 0; if (!update || (flags & STA_MODIFY_QUEUES)) { - add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); - if (flags & STA_MODIFY_QUEUES) - add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; + if (!iwl_mvm_has_new_tx_api(mvm)) { + add_sta_cmd.tfd_queue_msk = + cpu_to_le32(mvm_sta->tfd_queue_msk); + + if (flags & STA_MODIFY_QUEUES) + add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; + } else { + WARN_ON(flags & STA_MODIFY_QUEUES); + } } switch (sta->bandwidth) { @@ -209,13 +215,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK); + add_sta_cmd.uapsd_acs |= BIT(AC_BK); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE); + add_sta_cmd.uapsd_acs |= BIT(AC_BE); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI); + add_sta_cmd.uapsd_acs |= BIT(AC_VI); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO); + add_sta_cmd.uapsd_acs |= BIT(AC_VO); + add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; + add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; @@ -337,6 +345,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, u8 sta_id; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; spin_unlock_bh(&mvm->queue_info_lock); @@ -387,6 +398,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -426,6 +440,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -468,6 +485,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); txq_curr_ac = mvm->queue_info[queue].mac80211_ac; sta_id = mvm->queue_info[queue].ra_sta_id; @@ -512,6 +532,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, int i; lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); @@ -596,6 +618,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, unsigned long mq; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed @@ -677,6 +702,41 @@ out: return ret; } +static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 ac, + int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + u8 mac_queue = mvmsta->vif->hw_queue[ac]; + int queue = -1; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_TX_QUEUES(mvm, + "Allocating queue for sta %d on tid %d\n", + mvmsta->sta_id, tid); + queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, + wdg_timeout); + if (queue < 0) + return queue; + + IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); + + spin_lock_bh(&mvmsta->lock); + mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tid_data[tid].is_tid_active = true; + mvmsta->tfd_queue_msk |= BIT(queue); + spin_unlock_bh(&mvmsta->lock); + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + spin_unlock_bh(&mvm->queue_info_lock); + + return 0; +} + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_hdr *hdr) @@ -702,6 +762,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); + spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; spin_unlock_bh(&mvmsta->lock); @@ -880,6 +943,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + spin_lock_bh(&mvm->queue_info_lock); tid_bitmap = mvm->queue_info[queue].tid_bitmap; spin_unlock_bh(&mvm->queue_info_lock); @@ -917,6 +983,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) int ssn; int ret = true; + /* queue sharing is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->queue_info_lock); @@ -1199,18 +1269,30 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, ac = tid_to_mac80211_ac[i]; mac_queue = mvm_sta->vif->hw_queue[ac]; - cfg.tid = i; - cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; - cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || - txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); + if (iwl_mvm_has_new_tx_api(mvm)) { + IWL_DEBUG_TX_QUEUES(mvm, + "Re-mapping sta %d tid %d\n", + mvm_sta->sta_id, i); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, + mvm_sta->sta_id, + i, wdg_timeout); + tid_data->txq_id = txq_id; + } else { + u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); + + cfg.tid = i; + cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || + txq_id == + IWL_MVM_DQA_BSS_CLIENT_QUEUE); - IWL_DEBUG_TX_QUEUES(mvm, - "Re-mapping sta %d tid %d to queue %d\n", - mvm_sta->sta_id, i, txq_id); + IWL_DEBUG_TX_QUEUES(mvm, + "Re-mapping sta %d tid %d to queue %d\n", + mvm_sta->sta_id, i, txq_id); - iwl_mvm_enable_txq(mvm, txq_id, mac_queue, - IEEE80211_SEQ_TO_SN(tid_data->seq_number), - &cfg, wdg_timeout); + iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, + wdg_timeout); + } mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } @@ -1235,7 +1317,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, else sta_id = mvm_sta->sta_id; - if (sta_id == IWL_MVM_STATION_COUNT) + if (sta_id == IWL_MVM_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); @@ -1317,10 +1399,10 @@ update_fw: if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); + WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); mvmvif->ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); + WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); } } @@ -1571,11 +1653,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; /* clear d0i3_ap_sta_id if no longer relevant */ if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } } @@ -1584,7 +1666,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } @@ -1641,7 +1723,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } @@ -1652,12 +1734,11 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, return 0; } -static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta) +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); - sta->sta_id = IWL_MVM_STATION_COUNT; + sta->sta_id = IWL_MVM_INVALID_STA; } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, @@ -1676,7 +1757,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); - cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) @@ -1701,27 +1783,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Map Aux queue to fifo - needs to happen before adding Aux station */ - if (!iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - - /* Allocate aux station and assign to it the aux queue */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), - NL80211_IFTYPE_UNSPECIFIED); - if (ret) - return ret; - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + mvm->aux_queue = queue; + } else if (iwl_mvm_is_dqa_supported(mvm)) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvm->aux_sta.sta_id, @@ -1732,14 +1806,43 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, wdg_timeout); + } else { + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); } +} - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, - MAC_INDEX_AUX, 0); +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + /* Allocate aux station and assign to it the aux queue */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), + NL80211_IFTYPE_UNSPECIFIED); if (ret) + return ret; + + /* Map Aux queue to fifo - needs to happen before adding Aux station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_queue(mvm); + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, + MAC_INDEX_AUX, 0); + if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); - return ret; + return ret; + } + + /* + * For a000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_queue(mvm); + + return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1790,39 +1893,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; + int queue = 0; int ret; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_VO, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm)) { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_VO, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - int queue; - + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) - queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + queue = mvm->probe_queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + queue = mvm->p2p_dev_queue; else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) return -EINVAL; - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, - wdg_timeout); bsta->tfd_queue_msk |= BIT(queue); + + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, + &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; - if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, @@ -1831,27 +1934,20 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; /* - * In AP vif type, we also need to enable the cab_queue. However, we - * have to enable it after the ADD_STA command is sent, otherwise the - * FW will throw an assert once we send the ADD_STA command (it'll - * detect a mismatch in the tfd_queue_msk, as we can't add the - * enabled-cab_queue to the mask) + * For a000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added */ - if (iwl_mvm_is_dqa_supported(mvm) && - (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC)) { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], + bsta->sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + if (vif->type == NL80211_IFTYPE_AP) + mvm->probe_queue = queue; + else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + mvm->p2p_dev_queue = queue; - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, - 0, &cfg, wdg_timeout); + bsta->tfd_queue_msk |= BIT(queue); } return 0; @@ -1869,24 +1965,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); - if (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) { - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) { + iwl_mvm_disable_txq(mvm, mvm->probe_queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); - mvmvif->bcast_sta.tfd_queue_msk &= - ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue); } - if (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) { - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_P2P_DEVICE_QUEUE, + if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) { + iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); - mvmvif->bcast_sta.tfd_queue_msk &= - ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue); } } @@ -1982,6 +2072,88 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } +/* + * Allocate a new station entry for the multicast station to the given vif, + * and send it to the FW. + * Note that each AP/GO mac should have its own multicast station. + * + * @mvm: the mvm component + * @vif: the interface to which the multicast station is added + */ +int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; + static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; + const u8 *maddr = _maddr; + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = msta->sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_is_dqa_supported(mvm)) + return 0; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) + return -ENOTSUPP; + + ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, + mvmvif->id, mvmvif->color); + if (ret) { + iwl_mvm_dealloc_int_sta(mvm, msta); + return ret; + } + + /* + * Enable cab queue after the ADD_STA command is sent. + * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG + * command with unknown station id. + */ + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, + msta->sta_id, + IWL_MAX_TID_COUNT, + timeout); + vif->cab_queue = queue; + } else { + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, + &cfg, timeout); + } + + return 0; +} + +/* + * Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_is_dqa_supported(mvm)) + return 0; + + iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, + IWL_MAX_TID_COUNT, 0); + + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + + return ret; +} + #define IWL_MAX_RX_BA_SESSIONS 16 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) @@ -2059,6 +2231,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->sta_id = sta_id; + reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&reorder_buf->entries[j]); } @@ -2226,7 +2399,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; - cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.modify_mask = STA_MODIFY_QUEUES; + cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); @@ -2427,6 +2602,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { /* + * On new TX API rs and BA manager are offloaded. + * For now though, just don't support being reconfigured + */ + if (iwl_mvm_has_new_tx_api(mvm)) + return -ENOTSUPP; + + /* * If reconfiguring an existing queue, it first must be * drained */ @@ -2675,7 +2857,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], @@ -2697,68 +2879,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, - struct ieee80211_key_conf *keyconf, bool mcast, + struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset) { - struct iwl_mvm_add_sta_key_cmd cmd = {}; + union { + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; + struct iwl_mvm_add_sta_key_cmd cmd; + } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; - int i; - u8 sta_id = mvm_sta->sta_id; + u64 pn = 0; + int i, size; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); - keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); - switch (keyconf->cipher) { + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); - cmd.tkip_rx_tsc_byte2 = tkip_iv32; - for (i = 0; i < 5; i++) - cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + if (new_api) { + memcpy((void *)&u.cmd.tx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + memcpy((void *)&u.cmd.rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + pn = atomic64_read(&key->tx_pn); + + } else { + u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + u.cmd_v1.tkip_rx_ttak[i] = + cpu_to_le16(tkip_p1k[i]); + } + memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); + if (new_api) + pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); /* fall through */ case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); - memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); /* fall through */ case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); + if (new_api) + pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_offset = key_offset; - cmd.key_flags = key_flags; - cmd.sta_id = sta_id; + u.cmd.common.key_offset = key_offset; + u.cmd.common.key_flags = key_flags; + u.cmd.common.sta_id = mvm_sta->sta_id; + + if (new_api) { + u.cmd.transmit_seq_cnt = cpu_to_le64(pn); + size = sizeof(u.cmd); + } else { + size = sizeof(u.cmd_v1); + } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, + &u.cmd); else - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, + &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: @@ -2858,7 +3069,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -2911,9 +3122,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { - struct iwl_mvm_add_sta_key_cmd cmd = {}; + union { + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; + struct iwl_mvm_add_sta_key_cmd cmd; + } u = {}; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; - int ret; + int ret, size; u32 status; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -2924,13 +3140,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_flags = key_flags; - cmd.key_offset = keyconf->hw_key_idx; - cmd.sta_id = sta_id; + /* + * The fields assigned here are in the same location at the start + * of the command, so we can do this union trick. + */ + u.cmd.common.key_flags = key_flags; + u.cmd.common.key_offset = keyconf->hw_key_idx; + u.cmd.common.sta_id = sta_id; + + size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, + &status); switch (status) { case ADD_STA_SUCCESS: @@ -3044,7 +3266,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_STATION_COUNT; + u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); @@ -3301,7 +3523,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Block/unblock all the stations of the given mvmvif */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1927ce607798..a143a8757e27 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -532,10 +532,13 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 9f160fc58cd0..df7cd87199ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) @@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) @@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* populate TDLS peer data */ cnt = 0; - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta) || !sta->tdls) @@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); if (state == IWL_MVM_TDLS_SW_IDLE) - mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) @@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); @@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) goto out; sta = rcu_dereference_protected( @@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { + if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); @@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } @@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; @@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index a1947d6f3a2c..16ce8a56b5b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm) if (IWL_MVM_TOF_IS_RESPONDER) { tof_data->responder_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); - tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; + tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index bec7d9c46087..f9cbd197246f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) struct iwl_mvm_sta *mvmsta; int i, err; - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); if (!mvmsta) continue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 1ba0a6f55503..8f737f6cdd80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = TX_CMD; + + if (iwl_mvm_has_new_tx_api(mvm)) { + struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info); + + /* padding is inserted later in transport */ + /* FIXME - check for AMSDU may need to be removed */ + if (ieee80211_hdrlen(hdr->frame_control) % 4 && + !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) + offload_assist |= BIT(TX_CMD_OFFLD_PAD); + + cmd->offload_assist |= cpu_to_le16(offload_assist); + + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); + + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + + if (!info->control.hw_key) + cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS); + + /* For data packets rate info comes from the fw */ + if (ieee80211_is_data(hdr->frame_control) && sta) + goto out; + + cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE); + cmd->rate_n_flags = + cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); + + goto out; + } + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) @@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + +out: return dev_cmd; } @@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, */ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || ieee80211_is_deauth(fc)) - return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + return mvm->probe_queue; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); - return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + return mvm->probe_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) - return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + return mvm->p2p_dev_queue; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; WARN_ON_ONCE(1); - return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; @@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info info; struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); int queue; @@ -598,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); - if (ap_sta_id != IWL_MVM_STATION_COUNT) + if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; } else if (iwl_mvm_is_dqa_supported(mvm) && info.control.vif->type == NL80211_IFTYPE_STATION && @@ -616,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; @@ -713,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * fifo to be able to send bursts. */ max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->shared_mem_cfg.txfifo_size[txf] - 256); + mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -862,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) unsigned long now = jiffies; int tid; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) @@ -881,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; @@ -896,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, @@ -904,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (!dev_cmd) goto drop; - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses @@ -926,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) goto drop_unlock_sta; - seq_number = mvmsta->tid_data[tid].seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ON_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON)) goto drop_unlock_sta; + + seq_number = mvmsta->tid_data[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + + if (!iwl_mvm_has_new_tx_api(mvm)) { + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + /* update the tx_cmd hdr as it was already copied */ + tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; + } } if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) txq_id = mvmsta->tid_data[tid].txq_id; + if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { /* default to TID 0 for non-QoS packets */ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; @@ -945,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; } - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ @@ -1036,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); @@ -1245,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, } } +/** + * iwl_mvm_get_scd_ssn - returns the SSN of the SCD + * @tx_resp: the Tx response from the fw (agg or non-agg) + * + * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since + * it can't know that everything will go well until the end of the AMPDU, it + * can't know in advance the number of MPDUs that will be sent in the current + * batch. This is why it writes the agg Tx response while it fetches the MPDUs. + * Hence, it can't know in advance what the SSN of the SCD will be at the end + * of the batch. This is why the SSN of the SCD is written at the end of the + * whole struct at a variable offset. This function knows how to cope with the + * variable offset and returns the SSN of the SCD. + */ +static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, + struct iwl_mvm_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + + tx_resp->frame_count) & 0xfff; +} + static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1254,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); - u32 status = le16_to_cpu(tx_resp->status.status); - u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); + struct agg_tx_status *agg_status = + iwl_mvm_get_agg_status(mvm, tx_resp); + u32 status = le16_to_cpu(agg_status->status); + u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct iwl_mvm_sta *mvmsta; struct sk_buff_head skbs; u8 skb_freed = 0; @@ -1264,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, __skb_queue_head_init(&skbs); + if (iwl_mvm_has_new_tx_api(mvm)) + txq_id = le16_to_cpu(tx_resp->v6.tx_queue); + seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ @@ -1388,7 +1450,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, if (!IS_ERR(sta)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (tid != IWL_TID_NON_QOS) { + if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; @@ -1520,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - struct agg_tx_status *frame_status = &tx_resp->status; + struct agg_tx_status *frame_status = + iwl_mvm_get_agg_status(mvm, tx_resp); int i; for (i = 0; i < tx_resp->frame_count; i++) { @@ -1722,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; + if (!le16_to_cpu(ba_res->tfd_cnt)) + goto out; + /* * TODO: * When supporting multi TID aggregations - we need to move @@ -1730,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * This will go together with SN and AddBA offload and cannot * be handled properly for now. */ - WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1); - iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid, - (int)ba_res->tfd[0].q_num, + WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1); + tid = ba_res->ra_tid[0].tid; + if (tid == IWL_MGMT_TID) + tid = IWL_MAX_TID_COUNT; + iwl_mvm_tx_reclaim(mvm, sta_id, tid, + (int)(le16_to_cpu(ba_res->tfd[0].q_num)), le16_to_cpu(ba_res->tfd[0].tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate)); +out: IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index dedea96a8e0f..1dde05697c29 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -597,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; + if (iwl_mvm_has_new_tx_api(mvm)) + return -ENOSPC; + /* * If no free queue found - settle for an inactive one to reconfigure * Make sure that the inactive queue either already belongs to this STA, @@ -627,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, }; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, "Trying to reconfig unallocated queue %d\n", queue)) { @@ -644,20 +651,19 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return ret; } -void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 sta_id, u8 tid) { bool enable_queue = true; spin_lock_bh(&mvm->queue_info_lock); /* Make sure this TID isn't already enabled */ - if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", - queue, cfg->tid); - return; + queue, tid); + return false; } /* Update mappings and refcounts */ @@ -666,17 +672,17 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); mvm->queue_info[queue].hw_queue_refcount++; - mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); - mvm->queue_info[queue].ra_sta_id = cfg->sta_id; + mvm->queue_info[queue].tid_bitmap |= BIT(tid); + mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { - if (cfg->tid != IWL_MAX_TID_COUNT) + if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = - tid_to_mac80211_ac[cfg->tid]; + tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; - mvm->queue_info[queue].txq_tid = cfg->tid; + mvm->queue_info[queue].txq_tid = tid; } IWL_DEBUG_TX_QUEUES(mvm, @@ -686,8 +692,49 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, spin_unlock_bh(&mvm->queue_info_lock); + return enable_queue; +} + +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout) +{ + struct iwl_tx_queue_cfg_cmd cmd = { + .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), + .sta_id = sta_id, + .tid = tid, + }; + int queue; + + if (cmd.tid == IWL_MAX_TID_COUNT) + cmd.tid = IWL_MGMT_TID; + queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, + SCD_QUEUE_CFG, timeout); + + if (queue < 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "Failed allocating TXQ for sta %d tid %d, ret: %d\n", + sta_id, tid, queue); + return queue; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", + queue, sta_id, tid); + + iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid); + + return queue; +} + +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + /* Send the enabling command if we need to */ - if (enable_queue) { + if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, + cfg->sta_id, cfg->tid)) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, @@ -701,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); @@ -716,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool remove_mac_queue = true; - int ret; spin_lock_bh(&mvm->queue_info_lock); @@ -787,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, spin_unlock_bh(&mvm->queue_info_lock); - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", - queue, ret); + if (iwl_mvm_has_new_tx_api(mvm)) { + iwl_trans_txq_free(mvm->trans, queue); + } else { + int ret; - return ret; + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + sizeof(struct iwl_scd_txq_cfg_cmd), + &cmd); + + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); + return ret; + } + + return 0; } /** @@ -816,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) .data = { lq, }, }; - if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); @@ -1088,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ @@ -1154,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) unsigned long now = jiffies; int i; + if (iwl_mvm_has_new_tx_api(mvm)) + return; + spin_lock_bh(&mvm->queue_info_lock); for (i = 0; i < IWL_MAX_HW_QUEUES; i++) if (mvm->queue_info[i].hw_queue_refcount > 0) |